linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: QLogic Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 * Copyright (c) 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation.
  10 *
  11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  12 * Written by: Eliezer Tamir
  13 * Based on code from Michael Chan's bnx2 driver
  14 * UDP CSUM errata workaround by Arik Gendelman
  15 * Slowpath and fastpath rework by Vladislav Zolotarov
  16 * Statistics and Link management by Yitchak Gertner
  17 *
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/interrupt.h>
  25#include <linux/ip.h>
  26#include <linux/crash_dump.h>
  27#include <net/tcp.h>
  28#include <net/ipv6.h>
  29#include <net/ip6_checksum.h>
  30#include <net/busy_poll.h>
  31#include <linux/prefetch.h>
  32#include "bnx2x_cmn.h"
  33#include "bnx2x_init.h"
  34#include "bnx2x_sp.h"
  35
  36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
  38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  39static int bnx2x_poll(struct napi_struct *napi, int budget);
  40
  41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
  42{
  43        int i;
  44
  45        /* Add NAPI objects */
  46        for_each_rx_queue_cnic(bp, i) {
  47                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  48                               bnx2x_poll, NAPI_POLL_WEIGHT);
  49        }
  50}
  51
  52static void bnx2x_add_all_napi(struct bnx2x *bp)
  53{
  54        int i;
  55
  56        /* Add NAPI objects */
  57        for_each_eth_queue(bp, i) {
  58                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  59                               bnx2x_poll, NAPI_POLL_WEIGHT);
  60        }
  61}
  62
  63static int bnx2x_calc_num_queues(struct bnx2x *bp)
  64{
  65        int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
  66
  67        /* Reduce memory usage in kdump environment by using only one queue */
  68        if (is_kdump_kernel())
  69                nq = 1;
  70
  71        nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
  72        return nq;
  73}
  74
  75/**
  76 * bnx2x_move_fp - move content of the fastpath structure.
  77 *
  78 * @bp:         driver handle
  79 * @from:       source FP index
  80 * @to:         destination FP index
  81 *
  82 * Makes sure the contents of the bp->fp[to].napi is kept
  83 * intact. This is done by first copying the napi struct from
  84 * the target to the source, and then mem copying the entire
  85 * source onto the target. Update txdata pointers and related
  86 * content.
  87 */
  88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  89{
  90        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  91        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  92        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  93        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  94        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  95        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  96        int old_max_eth_txqs, new_max_eth_txqs;
  97        int old_txdata_index = 0, new_txdata_index = 0;
  98        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  99
 100        /* Copy the NAPI object as it has been already initialized */
 101        from_fp->napi = to_fp->napi;
 102
 103        /* Move bnx2x_fastpath contents */
 104        memcpy(to_fp, from_fp, sizeof(*to_fp));
 105        to_fp->index = to;
 106
 107        /* Retain the tpa_info of the original `to' version as we don't want
 108         * 2 FPs to contain the same tpa_info pointer.
 109         */
 110        to_fp->tpa_info = old_tpa_info;
 111
 112        /* move sp_objs contents as well, as their indices match fp ones */
 113        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
 114
 115        /* move fp_stats contents as well, as their indices match fp ones */
 116        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
 117
 118        /* Update txdata pointers in fp and move txdata content accordingly:
 119         * Each fp consumes 'max_cos' txdata structures, so the index should be
 120         * decremented by max_cos x delta.
 121         */
 122
 123        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
 124        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
 125                                (bp)->max_cos;
 126        if (from == FCOE_IDX(bp)) {
 127                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 128                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 129        }
 130
 131        memcpy(&bp->bnx2x_txq[new_txdata_index],
 132               &bp->bnx2x_txq[old_txdata_index],
 133               sizeof(struct bnx2x_fp_txdata));
 134        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 135}
 136
 137/**
 138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 139 *
 140 * @bp:        driver handle
 141 * @buf:       character buffer to fill with the fw name
 142 * @buf_len:   length of the above buffer
 143 *
 144 */
 145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 146{
 147        if (IS_PF(bp)) {
 148                u8 phy_fw_ver[PHY_FW_VER_LEN];
 149
 150                phy_fw_ver[0] = '\0';
 151                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 152                                             phy_fw_ver, PHY_FW_VER_LEN);
 153                strlcpy(buf, bp->fw_ver, buf_len);
 154                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 155                         "bc %d.%d.%d%s%s",
 156                         (bp->common.bc_ver & 0xff0000) >> 16,
 157                         (bp->common.bc_ver & 0xff00) >> 8,
 158                         (bp->common.bc_ver & 0xff),
 159                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 160        } else {
 161                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 162        }
 163}
 164
 165/**
 166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 167 *
 168 * @bp: driver handle
 169 * @delta:      number of eth queues which were not allocated
 170 */
 171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 172{
 173        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 174
 175        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 176         * backward along the array could cause memory to be overridden
 177         */
 178        for (cos = 1; cos < bp->max_cos; cos++) {
 179                for (i = 0; i < old_eth_num - delta; i++) {
 180                        struct bnx2x_fastpath *fp = &bp->fp[i];
 181                        int new_idx = cos * (old_eth_num - delta) + i;
 182
 183                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 184                               sizeof(struct bnx2x_fp_txdata));
 185                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 186                }
 187        }
 188}
 189
 190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 191
 192/* free skb in the packet ring at pos idx
 193 * return idx of last bd freed
 194 */
 195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 196                             u16 idx, unsigned int *pkts_compl,
 197                             unsigned int *bytes_compl)
 198{
 199        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 200        struct eth_tx_start_bd *tx_start_bd;
 201        struct eth_tx_bd *tx_data_bd;
 202        struct sk_buff *skb = tx_buf->skb;
 203        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 204        int nbd;
 205        u16 split_bd_len = 0;
 206
 207        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 208        prefetch(&skb->end);
 209
 210        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 211           txdata->txq_index, idx, tx_buf, skb);
 212
 213        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 214
 215        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 216#ifdef BNX2X_STOP_ON_ERROR
 217        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 218                BNX2X_ERR("BAD nbd!\n");
 219                bnx2x_panic();
 220        }
 221#endif
 222        new_cons = nbd + tx_buf->first_bd;
 223
 224        /* Get the next bd */
 225        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 226
 227        /* Skip a parse bd... */
 228        --nbd;
 229        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 230
 231        if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
 232                /* Skip second parse bd... */
 233                --nbd;
 234                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 235        }
 236
 237        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 238        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 239                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 240                split_bd_len = BD_UNMAP_LEN(tx_data_bd);
 241                --nbd;
 242                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 243        }
 244
 245        /* unmap first bd */
 246        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 247                         BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
 248                         DMA_TO_DEVICE);
 249
 250        /* now free frags */
 251        while (nbd > 0) {
 252
 253                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 254                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 255                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 256                if (--nbd)
 257                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 258        }
 259
 260        /* release skb */
 261        WARN_ON(!skb);
 262        if (likely(skb)) {
 263                (*pkts_compl)++;
 264                (*bytes_compl) += skb->len;
 265                dev_kfree_skb_any(skb);
 266        }
 267
 268        tx_buf->first_bd = 0;
 269        tx_buf->skb = NULL;
 270
 271        return new_cons;
 272}
 273
 274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 275{
 276        struct netdev_queue *txq;
 277        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 278        unsigned int pkts_compl = 0, bytes_compl = 0;
 279
 280#ifdef BNX2X_STOP_ON_ERROR
 281        if (unlikely(bp->panic))
 282                return -1;
 283#endif
 284
 285        txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 286        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 287        sw_cons = txdata->tx_pkt_cons;
 288
 289        while (sw_cons != hw_cons) {
 290                u16 pkt_cons;
 291
 292                pkt_cons = TX_BD(sw_cons);
 293
 294                DP(NETIF_MSG_TX_DONE,
 295                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 296                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 297
 298                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 299                                            &pkts_compl, &bytes_compl);
 300
 301                sw_cons++;
 302        }
 303
 304        netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 305
 306        txdata->tx_pkt_cons = sw_cons;
 307        txdata->tx_bd_cons = bd_cons;
 308
 309        /* Need to make the tx_bd_cons update visible to start_xmit()
 310         * before checking for netif_tx_queue_stopped().  Without the
 311         * memory barrier, there is a small possibility that
 312         * start_xmit() will miss it and cause the queue to be stopped
 313         * forever.
 314         * On the other hand we need an rmb() here to ensure the proper
 315         * ordering of bit testing in the following
 316         * netif_tx_queue_stopped(txq) call.
 317         */
 318        smp_mb();
 319
 320        if (unlikely(netif_tx_queue_stopped(txq))) {
 321                /* Taking tx_lock() is needed to prevent re-enabling the queue
 322                 * while it's empty. This could have happen if rx_action() gets
 323                 * suspended in bnx2x_tx_int() after the condition before
 324                 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 325                 *
 326                 * stops the queue->sees fresh tx_bd_cons->releases the queue->
 327                 * sends some packets consuming the whole queue again->
 328                 * stops the queue
 329                 */
 330
 331                __netif_tx_lock(txq, smp_processor_id());
 332
 333                if ((netif_tx_queue_stopped(txq)) &&
 334                    (bp->state == BNX2X_STATE_OPEN) &&
 335                    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
 336                        netif_tx_wake_queue(txq);
 337
 338                __netif_tx_unlock(txq);
 339        }
 340        return 0;
 341}
 342
 343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 344                                             u16 idx)
 345{
 346        u16 last_max = fp->last_max_sge;
 347
 348        if (SUB_S16(idx, last_max) > 0)
 349                fp->last_max_sge = idx;
 350}
 351
 352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 353                                         u16 sge_len,
 354                                         struct eth_end_agg_rx_cqe *cqe)
 355{
 356        struct bnx2x *bp = fp->bp;
 357        u16 last_max, last_elem, first_elem;
 358        u16 delta = 0;
 359        u16 i;
 360
 361        if (!sge_len)
 362                return;
 363
 364        /* First mark all used pages */
 365        for (i = 0; i < sge_len; i++)
 366                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 367                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 368
 369        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 370           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 371
 372        /* Here we assume that the last SGE index is the biggest */
 373        prefetch((void *)(fp->sge_mask));
 374        bnx2x_update_last_max_sge(fp,
 375                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 376
 377        last_max = RX_SGE(fp->last_max_sge);
 378        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 379        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 380
 381        /* If ring is not full */
 382        if (last_elem + 1 != first_elem)
 383                last_elem++;
 384
 385        /* Now update the prod */
 386        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 387                if (likely(fp->sge_mask[i]))
 388                        break;
 389
 390                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 391                delta += BIT_VEC64_ELEM_SZ;
 392        }
 393
 394        if (delta > 0) {
 395                fp->rx_sge_prod += delta;
 396                /* clear page-end entries */
 397                bnx2x_clear_sge_mask_next_elems(fp);
 398        }
 399
 400        DP(NETIF_MSG_RX_STATUS,
 401           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 402           fp->last_max_sge, fp->rx_sge_prod);
 403}
 404
 405/* Get Toeplitz hash value in the skb using the value from the
 406 * CQE (calculated by HW).
 407 */
 408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 409                            const struct eth_fast_path_rx_cqe *cqe,
 410                            enum pkt_hash_types *rxhash_type)
 411{
 412        /* Get Toeplitz hash from CQE */
 413        if ((bp->dev->features & NETIF_F_RXHASH) &&
 414            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 415                enum eth_rss_hash_type htype;
 416
 417                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 418                *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 419                                (htype == TCP_IPV6_HASH_TYPE)) ?
 420                               PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 421
 422                return le32_to_cpu(cqe->rss_hash_result);
 423        }
 424        *rxhash_type = PKT_HASH_TYPE_NONE;
 425        return 0;
 426}
 427
 428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 429                            u16 cons, u16 prod,
 430                            struct eth_fast_path_rx_cqe *cqe)
 431{
 432        struct bnx2x *bp = fp->bp;
 433        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 434        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 435        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 436        dma_addr_t mapping;
 437        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 438        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 439
 440        /* print error if current state != stop */
 441        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 442                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 443
 444        /* Try to map an empty data buffer from the aggregation info  */
 445        mapping = dma_map_single(&bp->pdev->dev,
 446                                 first_buf->data + NET_SKB_PAD,
 447                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 448        /*
 449         *  ...if it fails - move the skb from the consumer to the producer
 450         *  and set the current aggregation state as ERROR to drop it
 451         *  when TPA_STOP arrives.
 452         */
 453
 454        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 455                /* Move the BD from the consumer to the producer */
 456                bnx2x_reuse_rx_data(fp, cons, prod);
 457                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 458                return;
 459        }
 460
 461        /* move empty data from pool to prod */
 462        prod_rx_buf->data = first_buf->data;
 463        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 464        /* point prod_bd to new data */
 465        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 466        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 467
 468        /* move partial skb from cons to pool (don't unmap yet) */
 469        *first_buf = *cons_rx_buf;
 470
 471        /* mark bin state as START */
 472        tpa_info->parsing_flags =
 473                le16_to_cpu(cqe->pars_flags.flags);
 474        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 475        tpa_info->tpa_state = BNX2X_TPA_START;
 476        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 477        tpa_info->placement_offset = cqe->placement_offset;
 478        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 479        if (fp->mode == TPA_MODE_GRO) {
 480                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 481                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 482                tpa_info->gro_size = gro_size;
 483        }
 484
 485#ifdef BNX2X_STOP_ON_ERROR
 486        fp->tpa_queue_used |= (1 << queue);
 487        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 488           fp->tpa_queue_used);
 489#endif
 490}
 491
 492/* Timestamp option length allowed for TPA aggregation:
 493 *
 494 *              nop nop kind length echo val
 495 */
 496#define TPA_TSTAMP_OPT_LEN      12
 497/**
 498 * bnx2x_set_gro_params - compute GRO values
 499 *
 500 * @skb:                packet skb
 501 * @parsing_flags:      parsing flags from the START CQE
 502 * @len_on_bd:          total length of the first packet for the
 503 *                      aggregation.
 504 * @pkt_len:            length of all segments
 505 *
 506 * Approximate value of the MSS for this aggregation calculated using
 507 * the first packet of it.
 508 * Compute number of aggregated segments, and gso_type.
 509 */
 510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 511                                 u16 len_on_bd, unsigned int pkt_len,
 512                                 u16 num_of_coalesced_segs)
 513{
 514        /* TPA aggregation won't have either IP options or TCP options
 515         * other than timestamp or IPv6 extension headers.
 516         */
 517        u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 518
 519        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 520            PRS_FLAG_OVERETH_IPV6) {
 521                hdrs_len += sizeof(struct ipv6hdr);
 522                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 523        } else {
 524                hdrs_len += sizeof(struct iphdr);
 525                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 526        }
 527
 528        /* Check if there was a TCP timestamp, if there is it's will
 529         * always be 12 bytes length: nop nop kind length echo val.
 530         *
 531         * Otherwise FW would close the aggregation.
 532         */
 533        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 534                hdrs_len += TPA_TSTAMP_OPT_LEN;
 535
 536        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 537
 538        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 539         * to skb_shinfo(skb)->gso_segs
 540         */
 541        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 542}
 543
 544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 545                              u16 index, gfp_t gfp_mask)
 546{
 547        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 548        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 549        struct bnx2x_alloc_pool *pool = &fp->page_pool;
 550        dma_addr_t mapping;
 551
 552        if (!pool->page) {
 553                pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
 554                if (unlikely(!pool->page))
 555                        return -ENOMEM;
 556
 557                pool->offset = 0;
 558        }
 559
 560        mapping = dma_map_page(&bp->pdev->dev, pool->page,
 561                               pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 562        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 563                BNX2X_ERR("Can't map sge\n");
 564                return -ENOMEM;
 565        }
 566
 567        sw_buf->page = pool->page;
 568        sw_buf->offset = pool->offset;
 569
 570        dma_unmap_addr_set(sw_buf, mapping, mapping);
 571
 572        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 573        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 574
 575        pool->offset += SGE_PAGE_SIZE;
 576        if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
 577                get_page(pool->page);
 578        else
 579                pool->page = NULL;
 580        return 0;
 581}
 582
 583static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 584                               struct bnx2x_agg_info *tpa_info,
 585                               u16 pages,
 586                               struct sk_buff *skb,
 587                               struct eth_end_agg_rx_cqe *cqe,
 588                               u16 cqe_idx)
 589{
 590        struct sw_rx_page *rx_pg, old_rx_pg;
 591        u32 i, frag_len, frag_size;
 592        int err, j, frag_id = 0;
 593        u16 len_on_bd = tpa_info->len_on_bd;
 594        u16 full_page = 0, gro_size = 0;
 595
 596        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 597
 598        if (fp->mode == TPA_MODE_GRO) {
 599                gro_size = tpa_info->gro_size;
 600                full_page = tpa_info->full_page;
 601        }
 602
 603        /* This is needed in order to enable forwarding support */
 604        if (frag_size)
 605                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 606                                     le16_to_cpu(cqe->pkt_len),
 607                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 608
 609#ifdef BNX2X_STOP_ON_ERROR
 610        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 611                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 612                          pages, cqe_idx);
 613                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 614                bnx2x_panic();
 615                return -EINVAL;
 616        }
 617#endif
 618
 619        /* Run through the SGL and compose the fragmented skb */
 620        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 621                u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 622
 623                /* FW gives the indices of the SGE as if the ring is an array
 624                   (meaning that "next" element will consume 2 indices) */
 625                if (fp->mode == TPA_MODE_GRO)
 626                        frag_len = min_t(u32, frag_size, (u32)full_page);
 627                else /* LRO */
 628                        frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
 629
 630                rx_pg = &fp->rx_page_ring[sge_idx];
 631                old_rx_pg = *rx_pg;
 632
 633                /* If we fail to allocate a substitute page, we simply stop
 634                   where we are and drop the whole packet */
 635                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
 636                if (unlikely(err)) {
 637                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 638                        return err;
 639                }
 640
 641                dma_unmap_page(&bp->pdev->dev,
 642                               dma_unmap_addr(&old_rx_pg, mapping),
 643                               SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 644                /* Add one frag and update the appropriate fields in the skb */
 645                if (fp->mode == TPA_MODE_LRO)
 646                        skb_fill_page_desc(skb, j, old_rx_pg.page,
 647                                           old_rx_pg.offset, frag_len);
 648                else { /* GRO */
 649                        int rem;
 650                        int offset = 0;
 651                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 652                                int len = rem > gro_size ? gro_size : rem;
 653                                skb_fill_page_desc(skb, frag_id++,
 654                                                   old_rx_pg.page,
 655                                                   old_rx_pg.offset + offset,
 656                                                   len);
 657                                if (offset)
 658                                        get_page(old_rx_pg.page);
 659                                offset += len;
 660                        }
 661                }
 662
 663                skb->data_len += frag_len;
 664                skb->truesize += SGE_PAGES;
 665                skb->len += frag_len;
 666
 667                frag_size -= frag_len;
 668        }
 669
 670        return 0;
 671}
 672
 673static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 674{
 675        if (fp->rx_frag_size)
 676                skb_free_frag(data);
 677        else
 678                kfree(data);
 679}
 680
 681static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 682{
 683        if (fp->rx_frag_size) {
 684                /* GFP_KERNEL allocations are used only during initialization */
 685                if (unlikely(gfpflags_allow_blocking(gfp_mask)))
 686                        return (void *)__get_free_page(gfp_mask);
 687
 688                return netdev_alloc_frag(fp->rx_frag_size);
 689        }
 690
 691        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 692}
 693
 694#ifdef CONFIG_INET
 695static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 696{
 697        const struct iphdr *iph = ip_hdr(skb);
 698        struct tcphdr *th;
 699
 700        skb_set_transport_header(skb, sizeof(struct iphdr));
 701        th = tcp_hdr(skb);
 702
 703        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 704                                  iph->saddr, iph->daddr, 0);
 705}
 706
 707static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 708{
 709        struct ipv6hdr *iph = ipv6_hdr(skb);
 710        struct tcphdr *th;
 711
 712        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 713        th = tcp_hdr(skb);
 714
 715        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 716                                  &iph->saddr, &iph->daddr, 0);
 717}
 718
 719static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 720                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 721{
 722        skb_reset_network_header(skb);
 723        gro_func(bp, skb);
 724        tcp_gro_complete(skb);
 725}
 726#endif
 727
 728static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 729                               struct sk_buff *skb)
 730{
 731#ifdef CONFIG_INET
 732        if (skb_shinfo(skb)->gso_size) {
 733                switch (be16_to_cpu(skb->protocol)) {
 734                case ETH_P_IP:
 735                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 736                        break;
 737                case ETH_P_IPV6:
 738                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 739                        break;
 740                default:
 741                        netdev_WARN_ONCE(bp->dev,
 742                                         "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 743                                         be16_to_cpu(skb->protocol));
 744                }
 745        }
 746#endif
 747        skb_record_rx_queue(skb, fp->rx_queue);
 748        napi_gro_receive(&fp->napi, skb);
 749}
 750
 751static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 752                           struct bnx2x_agg_info *tpa_info,
 753                           u16 pages,
 754                           struct eth_end_agg_rx_cqe *cqe,
 755                           u16 cqe_idx)
 756{
 757        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 758        u8 pad = tpa_info->placement_offset;
 759        u16 len = tpa_info->len_on_bd;
 760        struct sk_buff *skb = NULL;
 761        u8 *new_data, *data = rx_buf->data;
 762        u8 old_tpa_state = tpa_info->tpa_state;
 763
 764        tpa_info->tpa_state = BNX2X_TPA_STOP;
 765
 766        /* If we there was an error during the handling of the TPA_START -
 767         * drop this aggregation.
 768         */
 769        if (old_tpa_state == BNX2X_TPA_ERROR)
 770                goto drop;
 771
 772        /* Try to allocate the new data */
 773        new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
 774        /* Unmap skb in the pool anyway, as we are going to change
 775           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 776           fails. */
 777        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 778                         fp->rx_buf_size, DMA_FROM_DEVICE);
 779        if (likely(new_data))
 780                skb = build_skb(data, fp->rx_frag_size);
 781
 782        if (likely(skb)) {
 783#ifdef BNX2X_STOP_ON_ERROR
 784                if (pad + len > fp->rx_buf_size) {
 785                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 786                                  pad, len, fp->rx_buf_size);
 787                        bnx2x_panic();
 788                        return;
 789                }
 790#endif
 791
 792                skb_reserve(skb, pad + NET_SKB_PAD);
 793                skb_put(skb, len);
 794                skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 795
 796                skb->protocol = eth_type_trans(skb, bp->dev);
 797                skb->ip_summed = CHECKSUM_UNNECESSARY;
 798
 799                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 800                                         skb, cqe, cqe_idx)) {
 801                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 802                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
 803                        bnx2x_gro_receive(bp, fp, skb);
 804                } else {
 805                        DP(NETIF_MSG_RX_STATUS,
 806                           "Failed to allocate new pages - dropping packet!\n");
 807                        dev_kfree_skb_any(skb);
 808                }
 809
 810                /* put new data in bin */
 811                rx_buf->data = new_data;
 812
 813                return;
 814        }
 815        if (new_data)
 816                bnx2x_frag_free(fp, new_data);
 817drop:
 818        /* drop the packet and keep the buffer in the bin */
 819        DP(NETIF_MSG_RX_STATUS,
 820           "Failed to allocate or map a new skb - dropping packet!\n");
 821        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 822}
 823
 824static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 825                               u16 index, gfp_t gfp_mask)
 826{
 827        u8 *data;
 828        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 829        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 830        dma_addr_t mapping;
 831
 832        data = bnx2x_frag_alloc(fp, gfp_mask);
 833        if (unlikely(data == NULL))
 834                return -ENOMEM;
 835
 836        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 837                                 fp->rx_buf_size,
 838                                 DMA_FROM_DEVICE);
 839        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 840                bnx2x_frag_free(fp, data);
 841                BNX2X_ERR("Can't map rx data\n");
 842                return -ENOMEM;
 843        }
 844
 845        rx_buf->data = data;
 846        dma_unmap_addr_set(rx_buf, mapping, mapping);
 847
 848        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 849        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 850
 851        return 0;
 852}
 853
 854static
 855void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 856                                 struct bnx2x_fastpath *fp,
 857                                 struct bnx2x_eth_q_stats *qstats)
 858{
 859        /* Do nothing if no L4 csum validation was done.
 860         * We do not check whether IP csum was validated. For IPv4 we assume
 861         * that if the card got as far as validating the L4 csum, it also
 862         * validated the IP csum. IPv6 has no IP csum.
 863         */
 864        if (cqe->fast_path_cqe.status_flags &
 865            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 866                return;
 867
 868        /* If L4 validation was done, check if an error was found. */
 869
 870        if (cqe->fast_path_cqe.type_error_flags &
 871            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 872             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 873                qstats->hw_csum_err++;
 874        else
 875                skb->ip_summed = CHECKSUM_UNNECESSARY;
 876}
 877
 878static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 879{
 880        struct bnx2x *bp = fp->bp;
 881        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 882        u16 sw_comp_cons, sw_comp_prod;
 883        int rx_pkt = 0;
 884        union eth_rx_cqe *cqe;
 885        struct eth_fast_path_rx_cqe *cqe_fp;
 886
 887#ifdef BNX2X_STOP_ON_ERROR
 888        if (unlikely(bp->panic))
 889                return 0;
 890#endif
 891        if (budget <= 0)
 892                return rx_pkt;
 893
 894        bd_cons = fp->rx_bd_cons;
 895        bd_prod = fp->rx_bd_prod;
 896        bd_prod_fw = bd_prod;
 897        sw_comp_cons = fp->rx_comp_cons;
 898        sw_comp_prod = fp->rx_comp_prod;
 899
 900        comp_ring_cons = RCQ_BD(sw_comp_cons);
 901        cqe = &fp->rx_comp_ring[comp_ring_cons];
 902        cqe_fp = &cqe->fast_path_cqe;
 903
 904        DP(NETIF_MSG_RX_STATUS,
 905           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 906
 907        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 908                struct sw_rx_bd *rx_buf = NULL;
 909                struct sk_buff *skb;
 910                u8 cqe_fp_flags;
 911                enum eth_rx_cqe_type cqe_fp_type;
 912                u16 len, pad, queue;
 913                u8 *data;
 914                u32 rxhash;
 915                enum pkt_hash_types rxhash_type;
 916
 917#ifdef BNX2X_STOP_ON_ERROR
 918                if (unlikely(bp->panic))
 919                        return 0;
 920#endif
 921
 922                bd_prod = RX_BD(bd_prod);
 923                bd_cons = RX_BD(bd_cons);
 924
 925                /* A rmb() is required to ensure that the CQE is not read
 926                 * before it is written by the adapter DMA.  PCI ordering
 927                 * rules will make sure the other fields are written before
 928                 * the marker at the end of struct eth_fast_path_rx_cqe
 929                 * but without rmb() a weakly ordered processor can process
 930                 * stale data.  Without the barrier TPA state-machine might
 931                 * enter inconsistent state and kernel stack might be
 932                 * provided with incorrect packet description - these lead
 933                 * to various kernel crashed.
 934                 */
 935                rmb();
 936
 937                cqe_fp_flags = cqe_fp->type_error_flags;
 938                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 939
 940                DP(NETIF_MSG_RX_STATUS,
 941                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 942                   CQE_TYPE(cqe_fp_flags),
 943                   cqe_fp_flags, cqe_fp->status_flags,
 944                   le32_to_cpu(cqe_fp->rss_hash_result),
 945                   le16_to_cpu(cqe_fp->vlan_tag),
 946                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 947
 948                /* is this a slowpath msg? */
 949                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 950                        bnx2x_sp_event(fp, cqe);
 951                        goto next_cqe;
 952                }
 953
 954                rx_buf = &fp->rx_buf_ring[bd_cons];
 955                data = rx_buf->data;
 956
 957                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 958                        struct bnx2x_agg_info *tpa_info;
 959                        u16 frag_size, pages;
 960#ifdef BNX2X_STOP_ON_ERROR
 961                        /* sanity check */
 962                        if (fp->mode == TPA_MODE_DISABLED &&
 963                            (CQE_TYPE_START(cqe_fp_type) ||
 964                             CQE_TYPE_STOP(cqe_fp_type)))
 965                                BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
 966                                          CQE_TYPE(cqe_fp_type));
 967#endif
 968
 969                        if (CQE_TYPE_START(cqe_fp_type)) {
 970                                u16 queue = cqe_fp->queue_index;
 971                                DP(NETIF_MSG_RX_STATUS,
 972                                   "calling tpa_start on queue %d\n",
 973                                   queue);
 974
 975                                bnx2x_tpa_start(fp, queue,
 976                                                bd_cons, bd_prod,
 977                                                cqe_fp);
 978
 979                                goto next_rx;
 980                        }
 981                        queue = cqe->end_agg_cqe.queue_index;
 982                        tpa_info = &fp->tpa_info[queue];
 983                        DP(NETIF_MSG_RX_STATUS,
 984                           "calling tpa_stop on queue %d\n",
 985                           queue);
 986
 987                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 988                                    tpa_info->len_on_bd;
 989
 990                        if (fp->mode == TPA_MODE_GRO)
 991                                pages = (frag_size + tpa_info->full_page - 1) /
 992                                         tpa_info->full_page;
 993                        else
 994                                pages = SGE_PAGE_ALIGN(frag_size) >>
 995                                        SGE_PAGE_SHIFT;
 996
 997                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
 998                                       &cqe->end_agg_cqe, comp_ring_cons);
 999#ifdef BNX2X_STOP_ON_ERROR
1000                        if (bp->panic)
1001                                return 0;
1002#endif
1003
1004                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1005                        goto next_cqe;
1006                }
1007                /* non TPA */
1008                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1009                pad = cqe_fp->placement_offset;
1010                dma_sync_single_for_cpu(&bp->pdev->dev,
1011                                        dma_unmap_addr(rx_buf, mapping),
1012                                        pad + RX_COPY_THRESH,
1013                                        DMA_FROM_DEVICE);
1014                pad += NET_SKB_PAD;
1015                prefetch(data + pad); /* speedup eth_type_trans() */
1016                /* is this an error packet? */
1017                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1018                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1019                           "ERROR  flags %x  rx packet %u\n",
1020                           cqe_fp_flags, sw_comp_cons);
1021                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1022                        goto reuse_rx;
1023                }
1024
1025                /* Since we don't have a jumbo ring
1026                 * copy small packets if mtu > 1500
1027                 */
1028                if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1029                    (len <= RX_COPY_THRESH)) {
1030                        skb = napi_alloc_skb(&fp->napi, len);
1031                        if (skb == NULL) {
1032                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1033                                   "ERROR  packet dropped because of alloc failure\n");
1034                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1035                                goto reuse_rx;
1036                        }
1037                        memcpy(skb->data, data + pad, len);
1038                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1039                } else {
1040                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1041                                                       GFP_ATOMIC) == 0)) {
1042                                dma_unmap_single(&bp->pdev->dev,
1043                                                 dma_unmap_addr(rx_buf, mapping),
1044                                                 fp->rx_buf_size,
1045                                                 DMA_FROM_DEVICE);
1046                                skb = build_skb(data, fp->rx_frag_size);
1047                                if (unlikely(!skb)) {
1048                                        bnx2x_frag_free(fp, data);
1049                                        bnx2x_fp_qstats(bp, fp)->
1050                                                        rx_skb_alloc_failed++;
1051                                        goto next_rx;
1052                                }
1053                                skb_reserve(skb, pad);
1054                        } else {
1055                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1056                                   "ERROR  packet dropped because of alloc failure\n");
1057                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1058reuse_rx:
1059                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1060                                goto next_rx;
1061                        }
1062                }
1063
1064                skb_put(skb, len);
1065                skb->protocol = eth_type_trans(skb, bp->dev);
1066
1067                /* Set Toeplitz hash for a none-LRO skb */
1068                rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1069                skb_set_hash(skb, rxhash, rxhash_type);
1070
1071                skb_checksum_none_assert(skb);
1072
1073                if (bp->dev->features & NETIF_F_RXCSUM)
1074                        bnx2x_csum_validate(skb, cqe, fp,
1075                                            bnx2x_fp_qstats(bp, fp));
1076
1077                skb_record_rx_queue(skb, fp->rx_queue);
1078
1079                /* Check if this packet was timestamped */
1080                if (unlikely(cqe->fast_path_cqe.type_error_flags &
1081                             (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1082                        bnx2x_set_rx_ts(bp, skb);
1083
1084                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1085                    PARSING_FLAGS_VLAN)
1086                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1087                                               le16_to_cpu(cqe_fp->vlan_tag));
1088
1089                napi_gro_receive(&fp->napi, skb);
1090next_rx:
1091                rx_buf->data = NULL;
1092
1093                bd_cons = NEXT_RX_IDX(bd_cons);
1094                bd_prod = NEXT_RX_IDX(bd_prod);
1095                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1096                rx_pkt++;
1097next_cqe:
1098                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1099                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1100
1101                /* mark CQE as free */
1102                BNX2X_SEED_CQE(cqe_fp);
1103
1104                if (rx_pkt == budget)
1105                        break;
1106
1107                comp_ring_cons = RCQ_BD(sw_comp_cons);
1108                cqe = &fp->rx_comp_ring[comp_ring_cons];
1109                cqe_fp = &cqe->fast_path_cqe;
1110        } /* while */
1111
1112        fp->rx_bd_cons = bd_cons;
1113        fp->rx_bd_prod = bd_prod_fw;
1114        fp->rx_comp_cons = sw_comp_cons;
1115        fp->rx_comp_prod = sw_comp_prod;
1116
1117        /* Update producers */
1118        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1119                             fp->rx_sge_prod);
1120
1121        return rx_pkt;
1122}
1123
1124static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1125{
1126        struct bnx2x_fastpath *fp = fp_cookie;
1127        struct bnx2x *bp = fp->bp;
1128        u8 cos;
1129
1130        DP(NETIF_MSG_INTR,
1131           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1132           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1133
1134        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1135
1136#ifdef BNX2X_STOP_ON_ERROR
1137        if (unlikely(bp->panic))
1138                return IRQ_HANDLED;
1139#endif
1140
1141        /* Handle Rx and Tx according to MSI-X vector */
1142        for_each_cos_in_tx_queue(fp, cos)
1143                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1144
1145        prefetch(&fp->sb_running_index[SM_RX_ID]);
1146        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1147
1148        return IRQ_HANDLED;
1149}
1150
1151/* HW Lock for shared dual port PHYs */
1152void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1153{
1154        mutex_lock(&bp->port.phy_mutex);
1155
1156        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1157}
1158
1159void bnx2x_release_phy_lock(struct bnx2x *bp)
1160{
1161        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1162
1163        mutex_unlock(&bp->port.phy_mutex);
1164}
1165
1166/* calculates MF speed according to current linespeed and MF configuration */
1167u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1168{
1169        u16 line_speed = bp->link_vars.line_speed;
1170        if (IS_MF(bp)) {
1171                u16 maxCfg = bnx2x_extract_max_cfg(bp,
1172                                                   bp->mf_config[BP_VN(bp)]);
1173
1174                /* Calculate the current MAX line speed limit for the MF
1175                 * devices
1176                 */
1177                if (IS_MF_PERCENT_BW(bp))
1178                        line_speed = (line_speed * maxCfg) / 100;
1179                else { /* SD mode */
1180                        u16 vn_max_rate = maxCfg * 100;
1181
1182                        if (vn_max_rate < line_speed)
1183                                line_speed = vn_max_rate;
1184                }
1185        }
1186
1187        return line_speed;
1188}
1189
1190/**
1191 * bnx2x_fill_report_data - fill link report data to report
1192 *
1193 * @bp:         driver handle
1194 * @data:       link state to update
1195 *
1196 * It uses a none-atomic bit operations because is called under the mutex.
1197 */
1198static void bnx2x_fill_report_data(struct bnx2x *bp,
1199                                   struct bnx2x_link_report_data *data)
1200{
1201        memset(data, 0, sizeof(*data));
1202
1203        if (IS_PF(bp)) {
1204                /* Fill the report data: effective line speed */
1205                data->line_speed = bnx2x_get_mf_speed(bp);
1206
1207                /* Link is down */
1208                if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1209                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210                                  &data->link_report_flags);
1211
1212                if (!BNX2X_NUM_ETH_QUEUES(bp))
1213                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214                                  &data->link_report_flags);
1215
1216                /* Full DUPLEX */
1217                if (bp->link_vars.duplex == DUPLEX_FULL)
1218                        __set_bit(BNX2X_LINK_REPORT_FD,
1219                                  &data->link_report_flags);
1220
1221                /* Rx Flow Control is ON */
1222                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1223                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1224                                  &data->link_report_flags);
1225
1226                /* Tx Flow Control is ON */
1227                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1228                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1229                                  &data->link_report_flags);
1230        } else { /* VF */
1231                *data = bp->vf_link_vars;
1232        }
1233}
1234
1235/**
1236 * bnx2x_link_report - report link status to OS.
1237 *
1238 * @bp:         driver handle
1239 *
1240 * Calls the __bnx2x_link_report() under the same locking scheme
1241 * as a link/PHY state managing code to ensure a consistent link
1242 * reporting.
1243 */
1244
1245void bnx2x_link_report(struct bnx2x *bp)
1246{
1247        bnx2x_acquire_phy_lock(bp);
1248        __bnx2x_link_report(bp);
1249        bnx2x_release_phy_lock(bp);
1250}
1251
1252/**
1253 * __bnx2x_link_report - report link status to OS.
1254 *
1255 * @bp:         driver handle
1256 *
1257 * None atomic implementation.
1258 * Should be called under the phy_lock.
1259 */
1260void __bnx2x_link_report(struct bnx2x *bp)
1261{
1262        struct bnx2x_link_report_data cur_data;
1263
1264        if (bp->force_link_down) {
1265                bp->link_vars.link_up = 0;
1266                return;
1267        }
1268
1269        /* reread mf_cfg */
1270        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1271                bnx2x_read_mf_cfg(bp);
1272
1273        /* Read the current link report info */
1274        bnx2x_fill_report_data(bp, &cur_data);
1275
1276        /* Don't report link down or exactly the same link status twice */
1277        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1278            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1279                      &bp->last_reported_link.link_report_flags) &&
1280             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281                      &cur_data.link_report_flags)))
1282                return;
1283
1284        bp->link_cnt++;
1285
1286        /* We are going to report a new link parameters now -
1287         * remember the current data for the next time.
1288         */
1289        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1290
1291        /* propagate status to VFs */
1292        if (IS_PF(bp))
1293                bnx2x_iov_link_update(bp);
1294
1295        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1296                     &cur_data.link_report_flags)) {
1297                netif_carrier_off(bp->dev);
1298                netdev_err(bp->dev, "NIC Link is Down\n");
1299                return;
1300        } else {
1301                const char *duplex;
1302                const char *flow;
1303
1304                netif_carrier_on(bp->dev);
1305
1306                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1307                                       &cur_data.link_report_flags))
1308                        duplex = "full";
1309                else
1310                        duplex = "half";
1311
1312                /* Handle the FC at the end so that only these flags would be
1313                 * possibly set. This way we may easily check if there is no FC
1314                 * enabled.
1315                 */
1316                if (cur_data.link_report_flags) {
1317                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1318                                     &cur_data.link_report_flags)) {
1319                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1320                                     &cur_data.link_report_flags))
1321                                        flow = "ON - receive & transmit";
1322                                else
1323                                        flow = "ON - receive";
1324                        } else {
1325                                flow = "ON - transmit";
1326                        }
1327                } else {
1328                        flow = "none";
1329                }
1330                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1331                            cur_data.line_speed, duplex, flow);
1332        }
1333}
1334
1335static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1336{
1337        int i;
1338
1339        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1340                struct eth_rx_sge *sge;
1341
1342                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1343                sge->addr_hi =
1344                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1345                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1346
1347                sge->addr_lo =
1348                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1349                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1350        }
1351}
1352
1353static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1354                                struct bnx2x_fastpath *fp, int last)
1355{
1356        int i;
1357
1358        for (i = 0; i < last; i++) {
1359                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1360                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1361                u8 *data = first_buf->data;
1362
1363                if (data == NULL) {
1364                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1365                        continue;
1366                }
1367                if (tpa_info->tpa_state == BNX2X_TPA_START)
1368                        dma_unmap_single(&bp->pdev->dev,
1369                                         dma_unmap_addr(first_buf, mapping),
1370                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1371                bnx2x_frag_free(fp, data);
1372                first_buf->data = NULL;
1373        }
1374}
1375
1376void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1377{
1378        int j;
1379
1380        for_each_rx_queue_cnic(bp, j) {
1381                struct bnx2x_fastpath *fp = &bp->fp[j];
1382
1383                fp->rx_bd_cons = 0;
1384
1385                /* Activate BD ring */
1386                /* Warning!
1387                 * this will generate an interrupt (to the TSTORM)
1388                 * must only be done after chip is initialized
1389                 */
1390                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1391                                     fp->rx_sge_prod);
1392        }
1393}
1394
1395void bnx2x_init_rx_rings(struct bnx2x *bp)
1396{
1397        int func = BP_FUNC(bp);
1398        u16 ring_prod;
1399        int i, j;
1400
1401        /* Allocate TPA resources */
1402        for_each_eth_queue(bp, j) {
1403                struct bnx2x_fastpath *fp = &bp->fp[j];
1404
1405                DP(NETIF_MSG_IFUP,
1406                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1407
1408                if (fp->mode != TPA_MODE_DISABLED) {
1409                        /* Fill the per-aggregation pool */
1410                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1411                                struct bnx2x_agg_info *tpa_info =
1412                                        &fp->tpa_info[i];
1413                                struct sw_rx_bd *first_buf =
1414                                        &tpa_info->first_buf;
1415
1416                                first_buf->data =
1417                                        bnx2x_frag_alloc(fp, GFP_KERNEL);
1418                                if (!first_buf->data) {
1419                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1420                                                  j);
1421                                        bnx2x_free_tpa_pool(bp, fp, i);
1422                                        fp->mode = TPA_MODE_DISABLED;
1423                                        break;
1424                                }
1425                                dma_unmap_addr_set(first_buf, mapping, 0);
1426                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1427                        }
1428
1429                        /* "next page" elements initialization */
1430                        bnx2x_set_next_page_sgl(fp);
1431
1432                        /* set SGEs bit mask */
1433                        bnx2x_init_sge_ring_bit_mask(fp);
1434
1435                        /* Allocate SGEs and initialize the ring elements */
1436                        for (i = 0, ring_prod = 0;
1437                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1438
1439                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1440                                                       GFP_KERNEL) < 0) {
1441                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1442                                                  i);
1443                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1444                                                  j);
1445                                        /* Cleanup already allocated elements */
1446                                        bnx2x_free_rx_sge_range(bp, fp,
1447                                                                ring_prod);
1448                                        bnx2x_free_tpa_pool(bp, fp,
1449                                                            MAX_AGG_QS(bp));
1450                                        fp->mode = TPA_MODE_DISABLED;
1451                                        ring_prod = 0;
1452                                        break;
1453                                }
1454                                ring_prod = NEXT_SGE_IDX(ring_prod);
1455                        }
1456
1457                        fp->rx_sge_prod = ring_prod;
1458                }
1459        }
1460
1461        for_each_eth_queue(bp, j) {
1462                struct bnx2x_fastpath *fp = &bp->fp[j];
1463
1464                fp->rx_bd_cons = 0;
1465
1466                /* Activate BD ring */
1467                /* Warning!
1468                 * this will generate an interrupt (to the TSTORM)
1469                 * must only be done after chip is initialized
1470                 */
1471                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1472                                     fp->rx_sge_prod);
1473
1474                if (j != 0)
1475                        continue;
1476
1477                if (CHIP_IS_E1(bp)) {
1478                        REG_WR(bp, BAR_USTRORM_INTMEM +
1479                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1480                               U64_LO(fp->rx_comp_mapping));
1481                        REG_WR(bp, BAR_USTRORM_INTMEM +
1482                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1483                               U64_HI(fp->rx_comp_mapping));
1484                }
1485        }
1486}
1487
1488static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1489{
1490        u8 cos;
1491        struct bnx2x *bp = fp->bp;
1492
1493        for_each_cos_in_tx_queue(fp, cos) {
1494                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1495                unsigned pkts_compl = 0, bytes_compl = 0;
1496
1497                u16 sw_prod = txdata->tx_pkt_prod;
1498                u16 sw_cons = txdata->tx_pkt_cons;
1499
1500                while (sw_cons != sw_prod) {
1501                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1502                                          &pkts_compl, &bytes_compl);
1503                        sw_cons++;
1504                }
1505
1506                netdev_tx_reset_queue(
1507                        netdev_get_tx_queue(bp->dev,
1508                                            txdata->txq_index));
1509        }
1510}
1511
1512static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1513{
1514        int i;
1515
1516        for_each_tx_queue_cnic(bp, i) {
1517                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1518        }
1519}
1520
1521static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1522{
1523        int i;
1524
1525        for_each_eth_queue(bp, i) {
1526                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1527        }
1528}
1529
1530static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1531{
1532        struct bnx2x *bp = fp->bp;
1533        int i;
1534
1535        /* ring wasn't allocated */
1536        if (fp->rx_buf_ring == NULL)
1537                return;
1538
1539        for (i = 0; i < NUM_RX_BD; i++) {
1540                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1541                u8 *data = rx_buf->data;
1542
1543                if (data == NULL)
1544                        continue;
1545                dma_unmap_single(&bp->pdev->dev,
1546                                 dma_unmap_addr(rx_buf, mapping),
1547                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1548
1549                rx_buf->data = NULL;
1550                bnx2x_frag_free(fp, data);
1551        }
1552}
1553
1554static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1555{
1556        int j;
1557
1558        for_each_rx_queue_cnic(bp, j) {
1559                bnx2x_free_rx_bds(&bp->fp[j]);
1560        }
1561}
1562
1563static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1564{
1565        int j;
1566
1567        for_each_eth_queue(bp, j) {
1568                struct bnx2x_fastpath *fp = &bp->fp[j];
1569
1570                bnx2x_free_rx_bds(fp);
1571
1572                if (fp->mode != TPA_MODE_DISABLED)
1573                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1574        }
1575}
1576
1577static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1578{
1579        bnx2x_free_tx_skbs_cnic(bp);
1580        bnx2x_free_rx_skbs_cnic(bp);
1581}
1582
1583void bnx2x_free_skbs(struct bnx2x *bp)
1584{
1585        bnx2x_free_tx_skbs(bp);
1586        bnx2x_free_rx_skbs(bp);
1587}
1588
1589void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1590{
1591        /* load old values */
1592        u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1593
1594        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1595                /* leave all but MAX value */
1596                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1597
1598                /* set new MAX value */
1599                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1600                                & FUNC_MF_CFG_MAX_BW_MASK;
1601
1602                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1603        }
1604}
1605
1606/**
1607 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1608 *
1609 * @bp:         driver handle
1610 * @nvecs:      number of vectors to be released
1611 */
1612static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1613{
1614        int i, offset = 0;
1615
1616        if (nvecs == offset)
1617                return;
1618
1619        /* VFs don't have a default SB */
1620        if (IS_PF(bp)) {
1621                free_irq(bp->msix_table[offset].vector, bp->dev);
1622                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1623                   bp->msix_table[offset].vector);
1624                offset++;
1625        }
1626
1627        if (CNIC_SUPPORT(bp)) {
1628                if (nvecs == offset)
1629                        return;
1630                offset++;
1631        }
1632
1633        for_each_eth_queue(bp, i) {
1634                if (nvecs == offset)
1635                        return;
1636                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1637                   i, bp->msix_table[offset].vector);
1638
1639                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1640        }
1641}
1642
1643void bnx2x_free_irq(struct bnx2x *bp)
1644{
1645        if (bp->flags & USING_MSIX_FLAG &&
1646            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1647                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1648
1649                /* vfs don't have a default status block */
1650                if (IS_PF(bp))
1651                        nvecs++;
1652
1653                bnx2x_free_msix_irqs(bp, nvecs);
1654        } else {
1655                free_irq(bp->dev->irq, bp->dev);
1656        }
1657}
1658
1659int bnx2x_enable_msix(struct bnx2x *bp)
1660{
1661        int msix_vec = 0, i, rc;
1662
1663        /* VFs don't have a default status block */
1664        if (IS_PF(bp)) {
1665                bp->msix_table[msix_vec].entry = msix_vec;
1666                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1667                               bp->msix_table[0].entry);
1668                msix_vec++;
1669        }
1670
1671        /* Cnic requires an msix vector for itself */
1672        if (CNIC_SUPPORT(bp)) {
1673                bp->msix_table[msix_vec].entry = msix_vec;
1674                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1675                               msix_vec, bp->msix_table[msix_vec].entry);
1676                msix_vec++;
1677        }
1678
1679        /* We need separate vectors for ETH queues only (not FCoE) */
1680        for_each_eth_queue(bp, i) {
1681                bp->msix_table[msix_vec].entry = msix_vec;
1682                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1683                               msix_vec, msix_vec, i);
1684                msix_vec++;
1685        }
1686
1687        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1688           msix_vec);
1689
1690        rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1691                                   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1692        /*
1693         * reconfigure number of tx/rx queues according to available
1694         * MSI-X vectors
1695         */
1696        if (rc == -ENOSPC) {
1697                /* Get by with single vector */
1698                rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1699                if (rc < 0) {
1700                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1701                                       rc);
1702                        goto no_msix;
1703                }
1704
1705                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1706                bp->flags |= USING_SINGLE_MSIX_FLAG;
1707
1708                BNX2X_DEV_INFO("set number of queues to 1\n");
1709                bp->num_ethernet_queues = 1;
1710                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1711        } else if (rc < 0) {
1712                BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1713                goto no_msix;
1714        } else if (rc < msix_vec) {
1715                /* how less vectors we will have? */
1716                int diff = msix_vec - rc;
1717
1718                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1719
1720                /*
1721                 * decrease number of queues by number of unallocated entries
1722                 */
1723                bp->num_ethernet_queues -= diff;
1724                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1725
1726                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1727                               bp->num_queues);
1728        }
1729
1730        bp->flags |= USING_MSIX_FLAG;
1731
1732        return 0;
1733
1734no_msix:
1735        /* fall to INTx if not enough memory */
1736        if (rc == -ENOMEM)
1737                bp->flags |= DISABLE_MSI_FLAG;
1738
1739        return rc;
1740}
1741
1742static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1743{
1744        int i, rc, offset = 0;
1745
1746        /* no default status block for vf */
1747        if (IS_PF(bp)) {
1748                rc = request_irq(bp->msix_table[offset++].vector,
1749                                 bnx2x_msix_sp_int, 0,
1750                                 bp->dev->name, bp->dev);
1751                if (rc) {
1752                        BNX2X_ERR("request sp irq failed\n");
1753                        return -EBUSY;
1754                }
1755        }
1756
1757        if (CNIC_SUPPORT(bp))
1758                offset++;
1759
1760        for_each_eth_queue(bp, i) {
1761                struct bnx2x_fastpath *fp = &bp->fp[i];
1762                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1763                         bp->dev->name, i);
1764
1765                rc = request_irq(bp->msix_table[offset].vector,
1766                                 bnx2x_msix_fp_int, 0, fp->name, fp);
1767                if (rc) {
1768                        BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1769                              bp->msix_table[offset].vector, rc);
1770                        bnx2x_free_msix_irqs(bp, offset);
1771                        return -EBUSY;
1772                }
1773
1774                offset++;
1775        }
1776
1777        i = BNX2X_NUM_ETH_QUEUES(bp);
1778        if (IS_PF(bp)) {
1779                offset = 1 + CNIC_SUPPORT(bp);
1780                netdev_info(bp->dev,
1781                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1782                            bp->msix_table[0].vector,
1783                            0, bp->msix_table[offset].vector,
1784                            i - 1, bp->msix_table[offset + i - 1].vector);
1785        } else {
1786                offset = CNIC_SUPPORT(bp);
1787                netdev_info(bp->dev,
1788                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1789                            0, bp->msix_table[offset].vector,
1790                            i - 1, bp->msix_table[offset + i - 1].vector);
1791        }
1792        return 0;
1793}
1794
1795int bnx2x_enable_msi(struct bnx2x *bp)
1796{
1797        int rc;
1798
1799        rc = pci_enable_msi(bp->pdev);
1800        if (rc) {
1801                BNX2X_DEV_INFO("MSI is not attainable\n");
1802                return -1;
1803        }
1804        bp->flags |= USING_MSI_FLAG;
1805
1806        return 0;
1807}
1808
1809static int bnx2x_req_irq(struct bnx2x *bp)
1810{
1811        unsigned long flags;
1812        unsigned int irq;
1813
1814        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1815                flags = 0;
1816        else
1817                flags = IRQF_SHARED;
1818
1819        if (bp->flags & USING_MSIX_FLAG)
1820                irq = bp->msix_table[0].vector;
1821        else
1822                irq = bp->pdev->irq;
1823
1824        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1825}
1826
1827static int bnx2x_setup_irqs(struct bnx2x *bp)
1828{
1829        int rc = 0;
1830        if (bp->flags & USING_MSIX_FLAG &&
1831            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1832                rc = bnx2x_req_msix_irqs(bp);
1833                if (rc)
1834                        return rc;
1835        } else {
1836                rc = bnx2x_req_irq(bp);
1837                if (rc) {
1838                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1839                        return rc;
1840                }
1841                if (bp->flags & USING_MSI_FLAG) {
1842                        bp->dev->irq = bp->pdev->irq;
1843                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1844                                    bp->dev->irq);
1845                }
1846                if (bp->flags & USING_MSIX_FLAG) {
1847                        bp->dev->irq = bp->msix_table[0].vector;
1848                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1849                                    bp->dev->irq);
1850                }
1851        }
1852
1853        return 0;
1854}
1855
1856static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1857{
1858        int i;
1859
1860        for_each_rx_queue_cnic(bp, i) {
1861                napi_enable(&bnx2x_fp(bp, i, napi));
1862        }
1863}
1864
1865static void bnx2x_napi_enable(struct bnx2x *bp)
1866{
1867        int i;
1868
1869        for_each_eth_queue(bp, i) {
1870                napi_enable(&bnx2x_fp(bp, i, napi));
1871        }
1872}
1873
1874static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1875{
1876        int i;
1877
1878        for_each_rx_queue_cnic(bp, i) {
1879                napi_disable(&bnx2x_fp(bp, i, napi));
1880        }
1881}
1882
1883static void bnx2x_napi_disable(struct bnx2x *bp)
1884{
1885        int i;
1886
1887        for_each_eth_queue(bp, i) {
1888                napi_disable(&bnx2x_fp(bp, i, napi));
1889        }
1890}
1891
1892void bnx2x_netif_start(struct bnx2x *bp)
1893{
1894        if (netif_running(bp->dev)) {
1895                bnx2x_napi_enable(bp);
1896                if (CNIC_LOADED(bp))
1897                        bnx2x_napi_enable_cnic(bp);
1898                bnx2x_int_enable(bp);
1899                if (bp->state == BNX2X_STATE_OPEN)
1900                        netif_tx_wake_all_queues(bp->dev);
1901        }
1902}
1903
1904void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1905{
1906        bnx2x_int_disable_sync(bp, disable_hw);
1907        bnx2x_napi_disable(bp);
1908        if (CNIC_LOADED(bp))
1909                bnx2x_napi_disable_cnic(bp);
1910}
1911
1912u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1913                       struct net_device *sb_dev,
1914                       select_queue_fallback_t fallback)
1915{
1916        struct bnx2x *bp = netdev_priv(dev);
1917
1918        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1919                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1920                u16 ether_type = ntohs(hdr->h_proto);
1921
1922                /* Skip VLAN tag if present */
1923                if (ether_type == ETH_P_8021Q) {
1924                        struct vlan_ethhdr *vhdr =
1925                                (struct vlan_ethhdr *)skb->data;
1926
1927                        ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1928                }
1929
1930                /* If ethertype is FCoE or FIP - use FCoE ring */
1931                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1932                        return bnx2x_fcoe_tx(bp, txq_index);
1933        }
1934
1935        /* select a non-FCoE queue */
1936        return fallback(dev, skb, NULL) %
1937               (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1938}
1939
1940void bnx2x_set_num_queues(struct bnx2x *bp)
1941{
1942        /* RSS queues */
1943        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1944
1945        /* override in STORAGE SD modes */
1946        if (IS_MF_STORAGE_ONLY(bp))
1947                bp->num_ethernet_queues = 1;
1948
1949        /* Add special queues */
1950        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1951        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1952
1953        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1954}
1955
1956/**
1957 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1958 *
1959 * @bp:         Driver handle
1960 *
1961 * We currently support for at most 16 Tx queues for each CoS thus we will
1962 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1963 * bp->max_cos.
1964 *
1965 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1966 * index after all ETH L2 indices.
1967 *
1968 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1969 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1970 * 16..31,...) with indices that are not coupled with any real Tx queue.
1971 *
1972 * The proper configuration of skb->queue_mapping is handled by
1973 * bnx2x_select_queue() and __skb_tx_hash().
1974 *
1975 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1976 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1977 */
1978static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1979{
1980        int rc, tx, rx;
1981
1982        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1983        rx = BNX2X_NUM_ETH_QUEUES(bp);
1984
1985/* account for fcoe queue */
1986        if (include_cnic && !NO_FCOE(bp)) {
1987                rx++;
1988                tx++;
1989        }
1990
1991        rc = netif_set_real_num_tx_queues(bp->dev, tx);
1992        if (rc) {
1993                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1994                return rc;
1995        }
1996        rc = netif_set_real_num_rx_queues(bp->dev, rx);
1997        if (rc) {
1998                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1999                return rc;
2000        }
2001
2002        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2003                          tx, rx);
2004
2005        return rc;
2006}
2007
2008static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2009{
2010        int i;
2011
2012        for_each_queue(bp, i) {
2013                struct bnx2x_fastpath *fp = &bp->fp[i];
2014                u32 mtu;
2015
2016                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2017                if (IS_FCOE_IDX(i))
2018                        /*
2019                         * Although there are no IP frames expected to arrive to
2020                         * this ring we still want to add an
2021                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2022                         * overrun attack.
2023                         */
2024                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2025                else
2026                        mtu = bp->dev->mtu;
2027                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2028                                  IP_HEADER_ALIGNMENT_PADDING +
2029                                  ETH_OVERHEAD +
2030                                  mtu +
2031                                  BNX2X_FW_RX_ALIGN_END;
2032                fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2033                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2034                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2035                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2036                else
2037                        fp->rx_frag_size = 0;
2038        }
2039}
2040
2041static int bnx2x_init_rss(struct bnx2x *bp)
2042{
2043        int i;
2044        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2045
2046        /* Prepare the initial contents for the indirection table if RSS is
2047         * enabled
2048         */
2049        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2050                bp->rss_conf_obj.ind_table[i] =
2051                        bp->fp->cl_id +
2052                        ethtool_rxfh_indir_default(i, num_eth_queues);
2053
2054        /*
2055         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2056         * per-port, so if explicit configuration is needed , do it only
2057         * for a PMF.
2058         *
2059         * For 57712 and newer on the other hand it's a per-function
2060         * configuration.
2061         */
2062        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2063}
2064
2065int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2066              bool config_hash, bool enable)
2067{
2068        struct bnx2x_config_rss_params params = {NULL};
2069
2070        /* Although RSS is meaningless when there is a single HW queue we
2071         * still need it enabled in order to have HW Rx hash generated.
2072         *
2073         * if (!is_eth_multi(bp))
2074         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2075         */
2076
2077        params.rss_obj = rss_obj;
2078
2079        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2080
2081        if (enable) {
2082                __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2083
2084                /* RSS configuration */
2085                __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2086                __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2087                __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2088                __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2089                if (rss_obj->udp_rss_v4)
2090                        __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2091                if (rss_obj->udp_rss_v6)
2092                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2093
2094                if (!CHIP_IS_E1x(bp)) {
2095                        /* valid only for TUNN_MODE_VXLAN tunnel mode */
2096                        __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2097                        __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2098
2099                        /* valid only for TUNN_MODE_GRE tunnel mode */
2100                        __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2101                }
2102        } else {
2103                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2104        }
2105
2106        /* Hash bits */
2107        params.rss_result_mask = MULTI_MASK;
2108
2109        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2110
2111        if (config_hash) {
2112                /* RSS keys */
2113                netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2114                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2115        }
2116
2117        if (IS_PF(bp))
2118                return bnx2x_config_rss(bp, &params);
2119        else
2120                return bnx2x_vfpf_config_rss(bp, &params);
2121}
2122
2123static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2124{
2125        struct bnx2x_func_state_params func_params = {NULL};
2126
2127        /* Prepare parameters for function state transitions */
2128        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2129
2130        func_params.f_obj = &bp->func_obj;
2131        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2132
2133        func_params.params.hw_init.load_phase = load_code;
2134
2135        return bnx2x_func_state_change(bp, &func_params);
2136}
2137
2138/*
2139 * Cleans the object that have internal lists without sending
2140 * ramrods. Should be run when interrupts are disabled.
2141 */
2142void bnx2x_squeeze_objects(struct bnx2x *bp)
2143{
2144        int rc;
2145        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2146        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2147        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2148
2149        /***************** Cleanup MACs' object first *************************/
2150
2151        /* Wait for completion of requested */
2152        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2153        /* Perform a dry cleanup */
2154        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2155
2156        /* Clean ETH primary MAC */
2157        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2158        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2159                                 &ramrod_flags);
2160        if (rc != 0)
2161                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2162
2163        /* Cleanup UC list */
2164        vlan_mac_flags = 0;
2165        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2166        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2167                                 &ramrod_flags);
2168        if (rc != 0)
2169                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2170
2171        /***************** Now clean mcast object *****************************/
2172        rparam.mcast_obj = &bp->mcast_obj;
2173        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2174
2175        /* Add a DEL command... - Since we're doing a driver cleanup only,
2176         * we take a lock surrounding both the initial send and the CONTs,
2177         * as we don't want a true completion to disrupt us in the middle.
2178         */
2179        netif_addr_lock_bh(bp->dev);
2180        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2181        if (rc < 0)
2182                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2183                          rc);
2184
2185        /* ...and wait until all pending commands are cleared */
2186        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187        while (rc != 0) {
2188                if (rc < 0) {
2189                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2190                                  rc);
2191                        netif_addr_unlock_bh(bp->dev);
2192                        return;
2193                }
2194
2195                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196        }
2197        netif_addr_unlock_bh(bp->dev);
2198}
2199
2200#ifndef BNX2X_STOP_ON_ERROR
2201#define LOAD_ERROR_EXIT(bp, label) \
2202        do { \
2203                (bp)->state = BNX2X_STATE_ERROR; \
2204                goto label; \
2205        } while (0)
2206
2207#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2208        do { \
2209                bp->cnic_loaded = false; \
2210                goto label; \
2211        } while (0)
2212#else /*BNX2X_STOP_ON_ERROR*/
2213#define LOAD_ERROR_EXIT(bp, label) \
2214        do { \
2215                (bp)->state = BNX2X_STATE_ERROR; \
2216                (bp)->panic = 1; \
2217                return -EBUSY; \
2218        } while (0)
2219#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2220        do { \
2221                bp->cnic_loaded = false; \
2222                (bp)->panic = 1; \
2223                return -EBUSY; \
2224        } while (0)
2225#endif /*BNX2X_STOP_ON_ERROR*/
2226
2227static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2228{
2229        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2230                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2231        return;
2232}
2233
2234static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2235{
2236        int num_groups, vf_headroom = 0;
2237        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2238
2239        /* number of queues for statistics is number of eth queues + FCoE */
2240        u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2241
2242        /* Total number of FW statistics requests =
2243         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2244         * and fcoe l2 queue) stats + num of queues (which includes another 1
2245         * for fcoe l2 queue if applicable)
2246         */
2247        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2248
2249        /* vf stats appear in the request list, but their data is allocated by
2250         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2251         * it is used to determine where to place the vf stats queries in the
2252         * request struct
2253         */
2254        if (IS_SRIOV(bp))
2255                vf_headroom = bnx2x_vf_headroom(bp);
2256
2257        /* Request is built from stats_query_header and an array of
2258         * stats_query_cmd_group each of which contains
2259         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2260         * configured in the stats_query_header.
2261         */
2262        num_groups =
2263                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2264                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2265                 1 : 0));
2266
2267        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2268           bp->fw_stats_num, vf_headroom, num_groups);
2269        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2270                num_groups * sizeof(struct stats_query_cmd_group);
2271
2272        /* Data for statistics requests + stats_counter
2273         * stats_counter holds per-STORM counters that are incremented
2274         * when STORM has finished with the current request.
2275         * memory for FCoE offloaded statistics are counted anyway,
2276         * even if they will not be sent.
2277         * VF stats are not accounted for here as the data of VF stats is stored
2278         * in memory allocated by the VF, not here.
2279         */
2280        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2281                sizeof(struct per_pf_stats) +
2282                sizeof(struct fcoe_statistics_params) +
2283                sizeof(struct per_queue_stats) * num_queue_stats +
2284                sizeof(struct stats_counter);
2285
2286        bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2287                                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2288        if (!bp->fw_stats)
2289                goto alloc_mem_err;
2290
2291        /* Set shortcuts */
2292        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2293        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2294        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2295                ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2296        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2297                bp->fw_stats_req_sz;
2298
2299        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2300           U64_HI(bp->fw_stats_req_mapping),
2301           U64_LO(bp->fw_stats_req_mapping));
2302        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2303           U64_HI(bp->fw_stats_data_mapping),
2304           U64_LO(bp->fw_stats_data_mapping));
2305        return 0;
2306
2307alloc_mem_err:
2308        bnx2x_free_fw_stats_mem(bp);
2309        BNX2X_ERR("Can't allocate FW stats memory\n");
2310        return -ENOMEM;
2311}
2312
2313/* send load request to mcp and analyze response */
2314static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2315{
2316        u32 param;
2317
2318        /* init fw_seq */
2319        bp->fw_seq =
2320                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2321                 DRV_MSG_SEQ_NUMBER_MASK);
2322        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2323
2324        /* Get current FW pulse sequence */
2325        bp->fw_drv_pulse_wr_seq =
2326                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2327                 DRV_PULSE_SEQ_MASK);
2328        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2329
2330        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2331
2332        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2333                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2334
2335        /* load request */
2336        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2337
2338        /* if mcp fails to respond we must abort */
2339        if (!(*load_code)) {
2340                BNX2X_ERR("MCP response failure, aborting\n");
2341                return -EBUSY;
2342        }
2343
2344        /* If mcp refused (e.g. other port is in diagnostic mode) we
2345         * must abort
2346         */
2347        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2348                BNX2X_ERR("MCP refused load request, aborting\n");
2349                return -EBUSY;
2350        }
2351        return 0;
2352}
2353
2354/* check whether another PF has already loaded FW to chip. In
2355 * virtualized environments a pf from another VM may have already
2356 * initialized the device including loading FW
2357 */
2358int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2359{
2360        /* is another pf loaded on this engine? */
2361        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2362            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2363                /* build my FW version dword */
2364                u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2365                        (BCM_5710_FW_MINOR_VERSION << 8) +
2366                        (BCM_5710_FW_REVISION_VERSION << 16) +
2367                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2368
2369                /* read loaded FW from chip */
2370                u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2371
2372                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2373                   loaded_fw, my_fw);
2374
2375                /* abort nic load if version mismatch */
2376                if (my_fw != loaded_fw) {
2377                        if (print_err)
2378                                BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2379                                          loaded_fw, my_fw);
2380                        else
2381                                BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2382                                               loaded_fw, my_fw);
2383                        return -EBUSY;
2384                }
2385        }
2386        return 0;
2387}
2388
2389/* returns the "mcp load_code" according to global load_count array */
2390static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2391{
2392        int path = BP_PATH(bp);
2393
2394        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2395           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2396           bnx2x_load_count[path][2]);
2397        bnx2x_load_count[path][0]++;
2398        bnx2x_load_count[path][1 + port]++;
2399        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2400           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2401           bnx2x_load_count[path][2]);
2402        if (bnx2x_load_count[path][0] == 1)
2403                return FW_MSG_CODE_DRV_LOAD_COMMON;
2404        else if (bnx2x_load_count[path][1 + port] == 1)
2405                return FW_MSG_CODE_DRV_LOAD_PORT;
2406        else
2407                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2408}
2409
2410/* mark PMF if applicable */
2411static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2412{
2413        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2414            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2415            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2416                bp->port.pmf = 1;
2417                /* We need the barrier to ensure the ordering between the
2418                 * writing to bp->port.pmf here and reading it from the
2419                 * bnx2x_periodic_task().
2420                 */
2421                smp_mb();
2422        } else {
2423                bp->port.pmf = 0;
2424        }
2425
2426        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2427}
2428
2429static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2430{
2431        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2432             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2433            (bp->common.shmem2_base)) {
2434                if (SHMEM2_HAS(bp, dcc_support))
2435                        SHMEM2_WR(bp, dcc_support,
2436                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2437                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2438                if (SHMEM2_HAS(bp, afex_driver_support))
2439                        SHMEM2_WR(bp, afex_driver_support,
2440                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2441        }
2442
2443        /* Set AFEX default VLAN tag to an invalid value */
2444        bp->afex_def_vlan_tag = -1;
2445}
2446
2447/**
2448 * bnx2x_bz_fp - zero content of the fastpath structure.
2449 *
2450 * @bp:         driver handle
2451 * @index:      fastpath index to be zeroed
2452 *
2453 * Makes sure the contents of the bp->fp[index].napi is kept
2454 * intact.
2455 */
2456static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2457{
2458        struct bnx2x_fastpath *fp = &bp->fp[index];
2459        int cos;
2460        struct napi_struct orig_napi = fp->napi;
2461        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2462
2463        /* bzero bnx2x_fastpath contents */
2464        if (fp->tpa_info)
2465                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2466                       sizeof(struct bnx2x_agg_info));
2467        memset(fp, 0, sizeof(*fp));
2468
2469        /* Restore the NAPI object as it has been already initialized */
2470        fp->napi = orig_napi;
2471        fp->tpa_info = orig_tpa_info;
2472        fp->bp = bp;
2473        fp->index = index;
2474        if (IS_ETH_FP(fp))
2475                fp->max_cos = bp->max_cos;
2476        else
2477                /* Special queues support only one CoS */
2478                fp->max_cos = 1;
2479
2480        /* Init txdata pointers */
2481        if (IS_FCOE_FP(fp))
2482                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2483        if (IS_ETH_FP(fp))
2484                for_each_cos_in_tx_queue(fp, cos)
2485                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2486                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2487
2488        /* set the tpa flag for each queue. The tpa flag determines the queue
2489         * minimal size so it must be set prior to queue memory allocation
2490         */
2491        if (bp->dev->features & NETIF_F_LRO)
2492                fp->mode = TPA_MODE_LRO;
2493        else if (bp->dev->features & NETIF_F_GRO_HW)
2494                fp->mode = TPA_MODE_GRO;
2495        else
2496                fp->mode = TPA_MODE_DISABLED;
2497
2498        /* We don't want TPA if it's disabled in bp
2499         * or if this is an FCoE L2 ring.
2500         */
2501        if (bp->disable_tpa || IS_FCOE_FP(fp))
2502                fp->mode = TPA_MODE_DISABLED;
2503}
2504
2505void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2506{
2507        u32 cur;
2508
2509        if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2510                return;
2511
2512        cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2513        DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2514           cur, state);
2515
2516        SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2517}
2518
2519int bnx2x_load_cnic(struct bnx2x *bp)
2520{
2521        int i, rc, port = BP_PORT(bp);
2522
2523        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2524
2525        mutex_init(&bp->cnic_mutex);
2526
2527        if (IS_PF(bp)) {
2528                rc = bnx2x_alloc_mem_cnic(bp);
2529                if (rc) {
2530                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2531                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532                }
2533        }
2534
2535        rc = bnx2x_alloc_fp_mem_cnic(bp);
2536        if (rc) {
2537                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2538                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539        }
2540
2541        /* Update the number of queues with the cnic queues */
2542        rc = bnx2x_set_real_num_queues(bp, 1);
2543        if (rc) {
2544                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2545                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2546        }
2547
2548        /* Add all CNIC NAPI objects */
2549        bnx2x_add_all_napi_cnic(bp);
2550        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2551        bnx2x_napi_enable_cnic(bp);
2552
2553        rc = bnx2x_init_hw_func_cnic(bp);
2554        if (rc)
2555                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2556
2557        bnx2x_nic_init_cnic(bp);
2558
2559        if (IS_PF(bp)) {
2560                /* Enable Timer scan */
2561                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2562
2563                /* setup cnic queues */
2564                for_each_cnic_queue(bp, i) {
2565                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2566                        if (rc) {
2567                                BNX2X_ERR("Queue setup failed\n");
2568                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2569                        }
2570                }
2571        }
2572
2573        /* Initialize Rx filter. */
2574        bnx2x_set_rx_mode_inner(bp);
2575
2576        /* re-read iscsi info */
2577        bnx2x_get_iscsi_info(bp);
2578        bnx2x_setup_cnic_irq_info(bp);
2579        bnx2x_setup_cnic_info(bp);
2580        bp->cnic_loaded = true;
2581        if (bp->state == BNX2X_STATE_OPEN)
2582                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2583
2584        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2585
2586        return 0;
2587
2588#ifndef BNX2X_STOP_ON_ERROR
2589load_error_cnic2:
2590        /* Disable Timer scan */
2591        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2592
2593load_error_cnic1:
2594        bnx2x_napi_disable_cnic(bp);
2595        /* Update the number of queues without the cnic queues */
2596        if (bnx2x_set_real_num_queues(bp, 0))
2597                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2598load_error_cnic0:
2599        BNX2X_ERR("CNIC-related load failed\n");
2600        bnx2x_free_fp_mem_cnic(bp);
2601        bnx2x_free_mem_cnic(bp);
2602        return rc;
2603#endif /* ! BNX2X_STOP_ON_ERROR */
2604}
2605
2606/* must be called with rtnl_lock */
2607int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2608{
2609        int port = BP_PORT(bp);
2610        int i, rc = 0, load_code = 0;
2611
2612        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2613        DP(NETIF_MSG_IFUP,
2614           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2615
2616#ifdef BNX2X_STOP_ON_ERROR
2617        if (unlikely(bp->panic)) {
2618                BNX2X_ERR("Can't load NIC when there is panic\n");
2619                return -EPERM;
2620        }
2621#endif
2622
2623        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2624
2625        /* zero the structure w/o any lock, before SP handler is initialized */
2626        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2627        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2628                &bp->last_reported_link.link_report_flags);
2629
2630        if (IS_PF(bp))
2631                /* must be called before memory allocation and HW init */
2632                bnx2x_ilt_set_info(bp);
2633
2634        /*
2635         * Zero fastpath structures preserving invariants like napi, which are
2636         * allocated only once, fp index, max_cos, bp pointer.
2637         * Also set fp->mode and txdata_ptr.
2638         */
2639        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2640        for_each_queue(bp, i)
2641                bnx2x_bz_fp(bp, i);
2642        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2643                                  bp->num_cnic_queues) *
2644                                  sizeof(struct bnx2x_fp_txdata));
2645
2646        bp->fcoe_init = false;
2647
2648        /* Set the receive queues buffer size */
2649        bnx2x_set_rx_buf_size(bp);
2650
2651        if (IS_PF(bp)) {
2652                rc = bnx2x_alloc_mem(bp);
2653                if (rc) {
2654                        BNX2X_ERR("Unable to allocate bp memory\n");
2655                        return rc;
2656                }
2657        }
2658
2659        /* need to be done after alloc mem, since it's self adjusting to amount
2660         * of memory available for RSS queues
2661         */
2662        rc = bnx2x_alloc_fp_mem(bp);
2663        if (rc) {
2664                BNX2X_ERR("Unable to allocate memory for fps\n");
2665                LOAD_ERROR_EXIT(bp, load_error0);
2666        }
2667
2668        /* Allocated memory for FW statistics  */
2669        if (bnx2x_alloc_fw_stats_mem(bp))
2670                LOAD_ERROR_EXIT(bp, load_error0);
2671
2672        /* request pf to initialize status blocks */
2673        if (IS_VF(bp)) {
2674                rc = bnx2x_vfpf_init(bp);
2675                if (rc)
2676                        LOAD_ERROR_EXIT(bp, load_error0);
2677        }
2678
2679        /* As long as bnx2x_alloc_mem() may possibly update
2680         * bp->num_queues, bnx2x_set_real_num_queues() should always
2681         * come after it. At this stage cnic queues are not counted.
2682         */
2683        rc = bnx2x_set_real_num_queues(bp, 0);
2684        if (rc) {
2685                BNX2X_ERR("Unable to set real_num_queues\n");
2686                LOAD_ERROR_EXIT(bp, load_error0);
2687        }
2688
2689        /* configure multi cos mappings in kernel.
2690         * this configuration may be overridden by a multi class queue
2691         * discipline or by a dcbx negotiation result.
2692         */
2693        bnx2x_setup_tc(bp->dev, bp->max_cos);
2694
2695        /* Add all NAPI objects */
2696        bnx2x_add_all_napi(bp);
2697        DP(NETIF_MSG_IFUP, "napi added\n");
2698        bnx2x_napi_enable(bp);
2699
2700        if (IS_PF(bp)) {
2701                /* set pf load just before approaching the MCP */
2702                bnx2x_set_pf_load(bp);
2703
2704                /* if mcp exists send load request and analyze response */
2705                if (!BP_NOMCP(bp)) {
2706                        /* attempt to load pf */
2707                        rc = bnx2x_nic_load_request(bp, &load_code);
2708                        if (rc)
2709                                LOAD_ERROR_EXIT(bp, load_error1);
2710
2711                        /* what did mcp say? */
2712                        rc = bnx2x_compare_fw_ver(bp, load_code, true);
2713                        if (rc) {
2714                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2715                                LOAD_ERROR_EXIT(bp, load_error2);
2716                        }
2717                } else {
2718                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2719                }
2720
2721                /* mark pmf if applicable */
2722                bnx2x_nic_load_pmf(bp, load_code);
2723
2724                /* Init Function state controlling object */
2725                bnx2x__init_func_obj(bp);
2726
2727                /* Initialize HW */
2728                rc = bnx2x_init_hw(bp, load_code);
2729                if (rc) {
2730                        BNX2X_ERR("HW init failed, aborting\n");
2731                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2732                        LOAD_ERROR_EXIT(bp, load_error2);
2733                }
2734        }
2735
2736        bnx2x_pre_irq_nic_init(bp);
2737
2738        /* Connect to IRQs */
2739        rc = bnx2x_setup_irqs(bp);
2740        if (rc) {
2741                BNX2X_ERR("setup irqs failed\n");
2742                if (IS_PF(bp))
2743                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2744                LOAD_ERROR_EXIT(bp, load_error2);
2745        }
2746
2747        /* Init per-function objects */
2748        if (IS_PF(bp)) {
2749                /* Setup NIC internals and enable interrupts */
2750                bnx2x_post_irq_nic_init(bp, load_code);
2751
2752                bnx2x_init_bp_objs(bp);
2753                bnx2x_iov_nic_init(bp);
2754
2755                /* Set AFEX default VLAN tag to an invalid value */
2756                bp->afex_def_vlan_tag = -1;
2757                bnx2x_nic_load_afex_dcc(bp, load_code);
2758                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2759                rc = bnx2x_func_start(bp);
2760                if (rc) {
2761                        BNX2X_ERR("Function start failed!\n");
2762                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2763
2764                        LOAD_ERROR_EXIT(bp, load_error3);
2765                }
2766
2767                /* Send LOAD_DONE command to MCP */
2768                if (!BP_NOMCP(bp)) {
2769                        load_code = bnx2x_fw_command(bp,
2770                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2771                        if (!load_code) {
2772                                BNX2X_ERR("MCP response failure, aborting\n");
2773                                rc = -EBUSY;
2774                                LOAD_ERROR_EXIT(bp, load_error3);
2775                        }
2776                }
2777
2778                /* initialize FW coalescing state machines in RAM */
2779                bnx2x_update_coalesce(bp);
2780        }
2781
2782        /* setup the leading queue */
2783        rc = bnx2x_setup_leading(bp);
2784        if (rc) {
2785                BNX2X_ERR("Setup leading failed!\n");
2786                LOAD_ERROR_EXIT(bp, load_error3);
2787        }
2788
2789        /* set up the rest of the queues */
2790        for_each_nondefault_eth_queue(bp, i) {
2791                if (IS_PF(bp))
2792                        rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2793                else /* VF */
2794                        rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2795                if (rc) {
2796                        BNX2X_ERR("Queue %d setup failed\n", i);
2797                        LOAD_ERROR_EXIT(bp, load_error3);
2798                }
2799        }
2800
2801        /* setup rss */
2802        rc = bnx2x_init_rss(bp);
2803        if (rc) {
2804                BNX2X_ERR("PF RSS init failed\n");
2805                LOAD_ERROR_EXIT(bp, load_error3);
2806        }
2807
2808        /* Now when Clients are configured we are ready to work */
2809        bp->state = BNX2X_STATE_OPEN;
2810
2811        /* Configure a ucast MAC */
2812        if (IS_PF(bp))
2813                rc = bnx2x_set_eth_mac(bp, true);
2814        else /* vf */
2815                rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2816                                           true);
2817        if (rc) {
2818                BNX2X_ERR("Setting Ethernet MAC failed\n");
2819                LOAD_ERROR_EXIT(bp, load_error3);
2820        }
2821
2822        if (IS_PF(bp) && bp->pending_max) {
2823                bnx2x_update_max_mf_config(bp, bp->pending_max);
2824                bp->pending_max = 0;
2825        }
2826
2827        bp->force_link_down = false;
2828        if (bp->port.pmf) {
2829                rc = bnx2x_initial_phy_init(bp, load_mode);
2830                if (rc)
2831                        LOAD_ERROR_EXIT(bp, load_error3);
2832        }
2833        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2834
2835        /* Start fast path */
2836
2837        /* Re-configure vlan filters */
2838        rc = bnx2x_vlan_reconfigure_vid(bp);
2839        if (rc)
2840                LOAD_ERROR_EXIT(bp, load_error3);
2841
2842        /* Initialize Rx filter. */
2843        bnx2x_set_rx_mode_inner(bp);
2844
2845        if (bp->flags & PTP_SUPPORTED) {
2846                bnx2x_init_ptp(bp);
2847                bnx2x_configure_ptp_filters(bp);
2848        }
2849        /* Start Tx */
2850        switch (load_mode) {
2851        case LOAD_NORMAL:
2852                /* Tx queue should be only re-enabled */
2853                netif_tx_wake_all_queues(bp->dev);
2854                break;
2855
2856        case LOAD_OPEN:
2857                netif_tx_start_all_queues(bp->dev);
2858                smp_mb__after_atomic();
2859                break;
2860
2861        case LOAD_DIAG:
2862        case LOAD_LOOPBACK_EXT:
2863                bp->state = BNX2X_STATE_DIAG;
2864                break;
2865
2866        default:
2867                break;
2868        }
2869
2870        if (bp->port.pmf)
2871                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2872        else
2873                bnx2x__link_status_update(bp);
2874
2875        /* start the timer */
2876        mod_timer(&bp->timer, jiffies + bp->current_interval);
2877
2878        if (CNIC_ENABLED(bp))
2879                bnx2x_load_cnic(bp);
2880
2881        if (IS_PF(bp))
2882                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2883
2884        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2885                /* mark driver is loaded in shmem2 */
2886                u32 val;
2887                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2888                val &= ~DRV_FLAGS_MTU_MASK;
2889                val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2890                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2891                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2892                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2893        }
2894
2895        /* Wait for all pending SP commands to complete */
2896        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2897                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2898                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2899                return -EBUSY;
2900        }
2901
2902        /* Update driver data for On-Chip MFW dump. */
2903        if (IS_PF(bp))
2904                bnx2x_update_mfw_dump(bp);
2905
2906        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2907        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2908                bnx2x_dcbx_init(bp, false);
2909
2910        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2911                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2912
2913        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2914
2915        return 0;
2916
2917#ifndef BNX2X_STOP_ON_ERROR
2918load_error3:
2919        if (IS_PF(bp)) {
2920                bnx2x_int_disable_sync(bp, 1);
2921
2922                /* Clean queueable objects */
2923                bnx2x_squeeze_objects(bp);
2924        }
2925
2926        /* Free SKBs, SGEs, TPA pool and driver internals */
2927        bnx2x_free_skbs(bp);
2928        for_each_rx_queue(bp, i)
2929                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2930
2931        /* Release IRQs */
2932        bnx2x_free_irq(bp);
2933load_error2:
2934        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2935                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2936                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2937        }
2938
2939        bp->port.pmf = 0;
2940load_error1:
2941        bnx2x_napi_disable(bp);
2942        bnx2x_del_all_napi(bp);
2943
2944        /* clear pf_load status, as it was already set */
2945        if (IS_PF(bp))
2946                bnx2x_clear_pf_load(bp);
2947load_error0:
2948        bnx2x_free_fw_stats_mem(bp);
2949        bnx2x_free_fp_mem(bp);
2950        bnx2x_free_mem(bp);
2951
2952        return rc;
2953#endif /* ! BNX2X_STOP_ON_ERROR */
2954}
2955
2956int bnx2x_drain_tx_queues(struct bnx2x *bp)
2957{
2958        u8 rc = 0, cos, i;
2959
2960        /* Wait until tx fastpath tasks complete */
2961        for_each_tx_queue(bp, i) {
2962                struct bnx2x_fastpath *fp = &bp->fp[i];
2963
2964                for_each_cos_in_tx_queue(fp, cos)
2965                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2966                if (rc)
2967                        return rc;
2968        }
2969        return 0;
2970}
2971
2972/* must be called with rtnl_lock */
2973int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2974{
2975        int i;
2976        bool global = false;
2977
2978        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2979
2980        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2981                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2982
2983        /* mark driver is unloaded in shmem2 */
2984        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2985                u32 val;
2986                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2987                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2988                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2989        }
2990
2991        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2992            (bp->state == BNX2X_STATE_CLOSED ||
2993             bp->state == BNX2X_STATE_ERROR)) {
2994                /* We can get here if the driver has been unloaded
2995                 * during parity error recovery and is either waiting for a
2996                 * leader to complete or for other functions to unload and
2997                 * then ifdown has been issued. In this case we want to
2998                 * unload and let other functions to complete a recovery
2999                 * process.
3000                 */
3001                bp->recovery_state = BNX2X_RECOVERY_DONE;
3002                bp->is_leader = 0;
3003                bnx2x_release_leader_lock(bp);
3004                smp_mb();
3005
3006                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3007                BNX2X_ERR("Can't unload in closed or error state\n");
3008                return -EINVAL;
3009        }
3010
3011        /* Nothing to do during unload if previous bnx2x_nic_load()
3012         * have not completed successfully - all resources are released.
3013         *
3014         * we can get here only after unsuccessful ndo_* callback, during which
3015         * dev->IFF_UP flag is still on.
3016         */
3017        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3018                return 0;
3019
3020        /* It's important to set the bp->state to the value different from
3021         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3022         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3023         */
3024        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3025        smp_mb();
3026
3027        /* indicate to VFs that the PF is going down */
3028        bnx2x_iov_channel_down(bp);
3029
3030        if (CNIC_LOADED(bp))
3031                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3032
3033        /* Stop Tx */
3034        bnx2x_tx_disable(bp);
3035        netdev_reset_tc(bp->dev);
3036
3037        bp->rx_mode = BNX2X_RX_MODE_NONE;
3038
3039        del_timer_sync(&bp->timer);
3040
3041        if (IS_PF(bp) && !BP_NOMCP(bp)) {
3042                /* Set ALWAYS_ALIVE bit in shmem */
3043                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3044                bnx2x_drv_pulse(bp);
3045                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3046                bnx2x_save_statistics(bp);
3047        }
3048
3049        /* wait till consumers catch up with producers in all queues.
3050         * If we're recovering, FW can't write to host so no reason
3051         * to wait for the queues to complete all Tx.
3052         */
3053        if (unload_mode != UNLOAD_RECOVERY)
3054                bnx2x_drain_tx_queues(bp);
3055
3056        /* if VF indicate to PF this function is going down (PF will delete sp
3057         * elements and clear initializations
3058         */
3059        if (IS_VF(bp))
3060                bnx2x_vfpf_close_vf(bp);
3061        else if (unload_mode != UNLOAD_RECOVERY)
3062                /* if this is a normal/close unload need to clean up chip*/
3063                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3064        else {
3065                /* Send the UNLOAD_REQUEST to the MCP */
3066                bnx2x_send_unload_req(bp, unload_mode);
3067
3068                /* Prevent transactions to host from the functions on the
3069                 * engine that doesn't reset global blocks in case of global
3070                 * attention once global blocks are reset and gates are opened
3071                 * (the engine which leader will perform the recovery
3072                 * last).
3073                 */
3074                if (!CHIP_IS_E1x(bp))
3075                        bnx2x_pf_disable(bp);
3076
3077                /* Disable HW interrupts, NAPI */
3078                bnx2x_netif_stop(bp, 1);
3079                /* Delete all NAPI objects */
3080                bnx2x_del_all_napi(bp);
3081                if (CNIC_LOADED(bp))
3082                        bnx2x_del_all_napi_cnic(bp);
3083                /* Release IRQs */
3084                bnx2x_free_irq(bp);
3085
3086                /* Report UNLOAD_DONE to MCP */
3087                bnx2x_send_unload_done(bp, false);
3088        }
3089
3090        /*
3091         * At this stage no more interrupts will arrive so we may safely clean
3092         * the queueable objects here in case they failed to get cleaned so far.
3093         */
3094        if (IS_PF(bp))
3095                bnx2x_squeeze_objects(bp);
3096
3097        /* There should be no more pending SP commands at this stage */
3098        bp->sp_state = 0;
3099
3100        bp->port.pmf = 0;
3101
3102        /* clear pending work in rtnl task */
3103        bp->sp_rtnl_state = 0;
3104        smp_mb();
3105
3106        /* Free SKBs, SGEs, TPA pool and driver internals */
3107        bnx2x_free_skbs(bp);
3108        if (CNIC_LOADED(bp))
3109                bnx2x_free_skbs_cnic(bp);
3110        for_each_rx_queue(bp, i)
3111                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3112
3113        bnx2x_free_fp_mem(bp);
3114        if (CNIC_LOADED(bp))
3115                bnx2x_free_fp_mem_cnic(bp);
3116
3117        if (IS_PF(bp)) {
3118                if (CNIC_LOADED(bp))
3119                        bnx2x_free_mem_cnic(bp);
3120        }
3121        bnx2x_free_mem(bp);
3122
3123        bp->state = BNX2X_STATE_CLOSED;
3124        bp->cnic_loaded = false;
3125
3126        /* Clear driver version indication in shmem */
3127        if (IS_PF(bp) && !BP_NOMCP(bp))
3128                bnx2x_update_mng_version(bp);
3129
3130        /* Check if there are pending parity attentions. If there are - set
3131         * RECOVERY_IN_PROGRESS.
3132         */
3133        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3134                bnx2x_set_reset_in_progress(bp);
3135
3136                /* Set RESET_IS_GLOBAL if needed */
3137                if (global)
3138                        bnx2x_set_reset_global(bp);
3139        }
3140
3141        /* The last driver must disable a "close the gate" if there is no
3142         * parity attention or "process kill" pending.
3143         */
3144        if (IS_PF(bp) &&
3145            !bnx2x_clear_pf_load(bp) &&
3146            bnx2x_reset_is_done(bp, BP_PATH(bp)))
3147                bnx2x_disable_close_the_gate(bp);
3148
3149        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3150
3151        return 0;
3152}
3153
3154int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3155{
3156        u16 pmcsr;
3157
3158        /* If there is no power capability, silently succeed */
3159        if (!bp->pdev->pm_cap) {
3160                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3161                return 0;
3162        }
3163
3164        pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3165
3166        switch (state) {
3167        case PCI_D0:
3168                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3169                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3170                                       PCI_PM_CTRL_PME_STATUS));
3171
3172                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3173                        /* delay required during transition out of D3hot */
3174                        msleep(20);
3175                break;
3176
3177        case PCI_D3hot:
3178                /* If there are other clients above don't
3179                   shut down the power */
3180                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3181                        return 0;
3182                /* Don't shut down the power for emulation and FPGA */
3183                if (CHIP_REV_IS_SLOW(bp))
3184                        return 0;
3185
3186                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3187                pmcsr |= 3;
3188
3189                if (bp->wol)
3190                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3191
3192                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3193                                      pmcsr);
3194
3195                /* No more memory access after this point until
3196                * device is brought back to D0.
3197                */
3198                break;
3199
3200        default:
3201                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3202                return -EINVAL;
3203        }
3204        return 0;
3205}
3206
3207/*
3208 * net_device service functions
3209 */
3210static int bnx2x_poll(struct napi_struct *napi, int budget)
3211{
3212        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3213                                                 napi);
3214        struct bnx2x *bp = fp->bp;
3215        int rx_work_done;
3216        u8 cos;
3217
3218#ifdef BNX2X_STOP_ON_ERROR
3219        if (unlikely(bp->panic)) {
3220                napi_complete(napi);
3221                return 0;
3222        }
3223#endif
3224        for_each_cos_in_tx_queue(fp, cos)
3225                if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3226                        bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3227
3228        rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3229
3230        if (rx_work_done < budget) {
3231                /* No need to update SB for FCoE L2 ring as long as
3232                 * it's connected to the default SB and the SB
3233                 * has been updated when NAPI was scheduled.
3234                 */
3235                if (IS_FCOE_FP(fp)) {
3236                        napi_complete_done(napi, rx_work_done);
3237                } else {
3238                        bnx2x_update_fpsb_idx(fp);
3239                        /* bnx2x_has_rx_work() reads the status block,
3240                         * thus we need to ensure that status block indices
3241                         * have been actually read (bnx2x_update_fpsb_idx)
3242                         * prior to this check (bnx2x_has_rx_work) so that
3243                         * we won't write the "newer" value of the status block
3244                         * to IGU (if there was a DMA right after
3245                         * bnx2x_has_rx_work and if there is no rmb, the memory
3246                         * reading (bnx2x_update_fpsb_idx) may be postponed
3247                         * to right before bnx2x_ack_sb). In this case there
3248                         * will never be another interrupt until there is
3249                         * another update of the status block, while there
3250                         * is still unhandled work.
3251                         */
3252                        rmb();
3253
3254                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3255                                if (napi_complete_done(napi, rx_work_done)) {
3256                                        /* Re-enable interrupts */
3257                                        DP(NETIF_MSG_RX_STATUS,
3258                                           "Update index to %d\n", fp->fp_hc_idx);
3259                                        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3260                                                     le16_to_cpu(fp->fp_hc_idx),
3261                                                     IGU_INT_ENABLE, 1);
3262                                }
3263                        } else {
3264                                rx_work_done = budget;
3265                        }
3266                }
3267        }
3268
3269        return rx_work_done;
3270}
3271
3272/* we split the first BD into headers and data BDs
3273 * to ease the pain of our fellow microcode engineers
3274 * we use one mapping for both BDs
3275 */
3276static u16 bnx2x_tx_split(struct bnx2x *bp,
3277                          struct bnx2x_fp_txdata *txdata,
3278                          struct sw_tx_bd *tx_buf,
3279                          struct eth_tx_start_bd **tx_bd, u16 hlen,
3280                          u16 bd_prod)
3281{
3282        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3283        struct eth_tx_bd *d_tx_bd;
3284        dma_addr_t mapping;
3285        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3286
3287        /* first fix first BD */
3288        h_tx_bd->nbytes = cpu_to_le16(hlen);
3289
3290        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3291           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3292
3293        /* now get a new data BD
3294         * (after the pbd) and fill it */
3295        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3296        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3297
3298        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3299                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3300
3301        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3302        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3303        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3304
3305        /* this marks the BD as one that has no individual mapping */
3306        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3307
3308        DP(NETIF_MSG_TX_QUEUED,
3309           "TSO split data size is %d (%x:%x)\n",
3310           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3311
3312        /* update tx_bd */
3313        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3314
3315        return bd_prod;
3316}
3317
3318#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3319#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3320static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3321{
3322        __sum16 tsum = (__force __sum16) csum;
3323
3324        if (fix > 0)
3325                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3326                                  csum_partial(t_header - fix, fix, 0)));
3327
3328        else if (fix < 0)
3329                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3330                                  csum_partial(t_header, -fix, 0)));
3331
3332        return bswab16(tsum);
3333}
3334
3335static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3336{
3337        u32 rc;
3338        __u8 prot = 0;
3339        __be16 protocol;
3340
3341        if (skb->ip_summed != CHECKSUM_PARTIAL)
3342                return XMIT_PLAIN;
3343
3344        protocol = vlan_get_protocol(skb);
3345        if (protocol == htons(ETH_P_IPV6)) {
3346                rc = XMIT_CSUM_V6;
3347                prot = ipv6_hdr(skb)->nexthdr;
3348        } else {
3349                rc = XMIT_CSUM_V4;
3350                prot = ip_hdr(skb)->protocol;
3351        }
3352
3353        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3354                if (inner_ip_hdr(skb)->version == 6) {
3355                        rc |= XMIT_CSUM_ENC_V6;
3356                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3357                                rc |= XMIT_CSUM_TCP;
3358                } else {
3359                        rc |= XMIT_CSUM_ENC_V4;
3360                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3361                                rc |= XMIT_CSUM_TCP;
3362                }
3363        }
3364        if (prot == IPPROTO_TCP)
3365                rc |= XMIT_CSUM_TCP;
3366
3367        if (skb_is_gso(skb)) {
3368                if (skb_is_gso_v6(skb)) {
3369                        rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3370                        if (rc & XMIT_CSUM_ENC)
3371                                rc |= XMIT_GSO_ENC_V6;
3372                } else {
3373                        rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3374                        if (rc & XMIT_CSUM_ENC)
3375                                rc |= XMIT_GSO_ENC_V4;
3376                }
3377        }
3378
3379        return rc;
3380}
3381
3382/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3383#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3384
3385/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3386#define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3387
3388#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3389/* check if packet requires linearization (packet is too fragmented)
3390   no need to check fragmentation if page size > 8K (there will be no
3391   violation to FW restrictions) */
3392static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3393                             u32 xmit_type)
3394{
3395        int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3396        int to_copy = 0, hlen = 0;
3397
3398        if (xmit_type & XMIT_GSO_ENC)
3399                num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3400
3401        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3402                if (xmit_type & XMIT_GSO) {
3403                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3404                        int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3405                        /* Number of windows to check */
3406                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3407                        int wnd_idx = 0;
3408                        int frag_idx = 0;
3409                        u32 wnd_sum = 0;
3410
3411                        /* Headers length */
3412                        if (xmit_type & XMIT_GSO_ENC)
3413                                hlen = (int)(skb_inner_transport_header(skb) -
3414                                             skb->data) +
3415                                             inner_tcp_hdrlen(skb);
3416                        else
3417                                hlen = (int)(skb_transport_header(skb) -
3418                                             skb->data) + tcp_hdrlen(skb);
3419
3420                        /* Amount of data (w/o headers) on linear part of SKB*/
3421                        first_bd_sz = skb_headlen(skb) - hlen;
3422
3423                        wnd_sum  = first_bd_sz;
3424
3425                        /* Calculate the first sum - it's special */
3426                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3427                                wnd_sum +=
3428                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3429
3430                        /* If there was data on linear skb data - check it */
3431                        if (first_bd_sz > 0) {
3432                                if (unlikely(wnd_sum < lso_mss)) {
3433                                        to_copy = 1;
3434                                        goto exit_lbl;
3435                                }
3436
3437                                wnd_sum -= first_bd_sz;
3438                        }
3439
3440                        /* Others are easier: run through the frag list and
3441                           check all windows */
3442                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3443                                wnd_sum +=
3444                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3445
3446                                if (unlikely(wnd_sum < lso_mss)) {
3447                                        to_copy = 1;
3448                                        break;
3449                                }
3450                                wnd_sum -=
3451                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3452                        }
3453                } else {
3454                        /* in non-LSO too fragmented packet should always
3455                           be linearized */
3456                        to_copy = 1;
3457                }
3458        }
3459
3460exit_lbl:
3461        if (unlikely(to_copy))
3462                DP(NETIF_MSG_TX_QUEUED,
3463                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3464                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3465                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3466
3467        return to_copy;
3468}
3469#endif
3470
3471/**
3472 * bnx2x_set_pbd_gso - update PBD in GSO case.
3473 *
3474 * @skb:        packet skb
3475 * @pbd:        parse BD
3476 * @xmit_type:  xmit flags
3477 */
3478static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3479                              struct eth_tx_parse_bd_e1x *pbd,
3480                              u32 xmit_type)
3481{
3482        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3483        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3484        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3485
3486        if (xmit_type & XMIT_GSO_V4) {
3487                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3488                pbd->tcp_pseudo_csum =
3489                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3490                                                   ip_hdr(skb)->daddr,
3491                                                   0, IPPROTO_TCP, 0));
3492        } else {
3493                pbd->tcp_pseudo_csum =
3494                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3495                                                 &ipv6_hdr(skb)->daddr,
3496                                                 0, IPPROTO_TCP, 0));
3497        }
3498
3499        pbd->global_data |=
3500                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3501}
3502
3503/**
3504 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3505 *
3506 * @bp:                 driver handle
3507 * @skb:                packet skb
3508 * @parsing_data:       data to be updated
3509 * @xmit_type:          xmit flags
3510 *
3511 * 57712/578xx related, when skb has encapsulation
3512 */
3513static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3514                                 u32 *parsing_data, u32 xmit_type)
3515{
3516        *parsing_data |=
3517                ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3518                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3519                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3520
3521        if (xmit_type & XMIT_CSUM_TCP) {
3522                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3523                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3524                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3525
3526                return skb_inner_transport_header(skb) +
3527                        inner_tcp_hdrlen(skb) - skb->data;
3528        }
3529
3530        /* We support checksum offload for TCP and UDP only.
3531         * No need to pass the UDP header length - it's a constant.
3532         */
3533        return skb_inner_transport_header(skb) +
3534                sizeof(struct udphdr) - skb->data;
3535}
3536
3537/**
3538 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3539 *
3540 * @bp:                 driver handle
3541 * @skb:                packet skb
3542 * @parsing_data:       data to be updated
3543 * @xmit_type:          xmit flags
3544 *
3545 * 57712/578xx related
3546 */
3547static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3548                                u32 *parsing_data, u32 xmit_type)
3549{
3550        *parsing_data |=
3551                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3552                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3553                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3554
3555        if (xmit_type & XMIT_CSUM_TCP) {
3556                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3557                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3558                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3559
3560                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3561        }
3562        /* We support checksum offload for TCP and UDP only.
3563         * No need to pass the UDP header length - it's a constant.
3564         */
3565        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3566}
3567
3568/* set FW indication according to inner or outer protocols if tunneled */
3569static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3570                               struct eth_tx_start_bd *tx_start_bd,
3571                               u32 xmit_type)
3572{
3573        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3574
3575        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3576                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3577
3578        if (!(xmit_type & XMIT_CSUM_TCP))
3579                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3580}
3581
3582/**
3583 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3584 *
3585 * @bp:         driver handle
3586 * @skb:        packet skb
3587 * @pbd:        parse BD to be updated
3588 * @xmit_type:  xmit flags
3589 */
3590static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3591                             struct eth_tx_parse_bd_e1x *pbd,
3592                             u32 xmit_type)
3593{
3594        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3595
3596        /* for now NS flag is not used in Linux */
3597        pbd->global_data =
3598                cpu_to_le16(hlen |
3599                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3600                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3601
3602        pbd->ip_hlen_w = (skb_transport_header(skb) -
3603                        skb_network_header(skb)) >> 1;
3604
3605        hlen += pbd->ip_hlen_w;
3606
3607        /* We support checksum offload for TCP and UDP only */
3608        if (xmit_type & XMIT_CSUM_TCP)
3609                hlen += tcp_hdrlen(skb) / 2;
3610        else
3611                hlen += sizeof(struct udphdr) / 2;
3612
3613        pbd->total_hlen_w = cpu_to_le16(hlen);
3614        hlen = hlen*2;
3615
3616        if (xmit_type & XMIT_CSUM_TCP) {
3617                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3618
3619        } else {
3620                s8 fix = SKB_CS_OFF(skb); /* signed! */
3621
3622                DP(NETIF_MSG_TX_QUEUED,
3623                   "hlen %d  fix %d  csum before fix %x\n",
3624                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3625
3626                /* HW bug: fixup the CSUM */
3627                pbd->tcp_pseudo_csum =
3628                        bnx2x_csum_fix(skb_transport_header(skb),
3629                                       SKB_CS(skb), fix);
3630
3631                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3632                   pbd->tcp_pseudo_csum);
3633        }
3634
3635        return hlen;
3636}
3637
3638static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3639                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3640                                      struct eth_tx_parse_2nd_bd *pbd2,
3641                                      u16 *global_data,
3642                                      u32 xmit_type)
3643{
3644        u16 hlen_w = 0;
3645        u8 outerip_off, outerip_len = 0;
3646
3647        /* from outer IP to transport */
3648        hlen_w = (skb_inner_transport_header(skb) -
3649                  skb_network_header(skb)) >> 1;
3650
3651        /* transport len */
3652        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3653
3654        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3655
3656        /* outer IP header info */
3657        if (xmit_type & XMIT_CSUM_V4) {
3658                struct iphdr *iph = ip_hdr(skb);
3659                u32 csum = (__force u32)(~iph->check) -
3660                           (__force u32)iph->tot_len -
3661                           (__force u32)iph->frag_off;
3662
3663                outerip_len = iph->ihl << 1;
3664
3665                pbd2->fw_ip_csum_wo_len_flags_frag =
3666                        bswab16(csum_fold((__force __wsum)csum));
3667        } else {
3668                pbd2->fw_ip_hdr_to_payload_w =
3669                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3670                pbd_e2->data.tunnel_data.flags |=
3671                        ETH_TUNNEL_DATA_IPV6_OUTER;
3672        }
3673
3674        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3675
3676        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3677
3678        /* inner IP header info */
3679        if (xmit_type & XMIT_CSUM_ENC_V4) {
3680                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3681
3682                pbd_e2->data.tunnel_data.pseudo_csum =
3683                        bswab16(~csum_tcpudp_magic(
3684                                        inner_ip_hdr(skb)->saddr,
3685                                        inner_ip_hdr(skb)->daddr,
3686                                        0, IPPROTO_TCP, 0));
3687        } else {
3688                pbd_e2->data.tunnel_data.pseudo_csum =
3689                        bswab16(~csum_ipv6_magic(
3690                                        &inner_ipv6_hdr(skb)->saddr,
3691                                        &inner_ipv6_hdr(skb)->daddr,
3692                                        0, IPPROTO_TCP, 0));
3693        }
3694
3695        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3696
3697        *global_data |=
3698                outerip_off |
3699                (outerip_len <<
3700                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3701                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3702                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3703
3704        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3705                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3706                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3707        }
3708}
3709
3710static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3711                                         u32 xmit_type)
3712{
3713        struct ipv6hdr *ipv6;
3714
3715        if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3716                return;
3717
3718        if (xmit_type & XMIT_GSO_ENC_V6)
3719                ipv6 = inner_ipv6_hdr(skb);
3720        else /* XMIT_GSO_V6 */
3721                ipv6 = ipv6_hdr(skb);
3722
3723        if (ipv6->nexthdr == NEXTHDR_IPV6)
3724                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3725}
3726
3727/* called with netif_tx_lock
3728 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3729 * netif_wake_queue()
3730 */
3731netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3732{
3733        struct bnx2x *bp = netdev_priv(dev);
3734
3735        struct netdev_queue *txq;
3736        struct bnx2x_fp_txdata *txdata;
3737        struct sw_tx_bd *tx_buf;
3738        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3739        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3740        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3741        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3742        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3743        u32 pbd_e2_parsing_data = 0;
3744        u16 pkt_prod, bd_prod;
3745        int nbd, txq_index;
3746        dma_addr_t mapping;
3747        u32 xmit_type = bnx2x_xmit_type(bp, skb);
3748        int i;
3749        u8 hlen = 0;
3750        __le16 pkt_size = 0;
3751        struct ethhdr *eth;
3752        u8 mac_type = UNICAST_ADDRESS;
3753
3754#ifdef BNX2X_STOP_ON_ERROR
3755        if (unlikely(bp->panic))
3756                return NETDEV_TX_BUSY;
3757#endif
3758
3759        txq_index = skb_get_queue_mapping(skb);
3760        txq = netdev_get_tx_queue(dev, txq_index);
3761
3762        BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3763
3764        txdata = &bp->bnx2x_txq[txq_index];
3765
3766        /* enable this debug print to view the transmission queue being used
3767        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3768           txq_index, fp_index, txdata_index); */
3769
3770        /* enable this debug print to view the transmission details
3771        DP(NETIF_MSG_TX_QUEUED,
3772           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3773           txdata->cid, fp_index, txdata_index, txdata, fp); */
3774
3775        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3776                        skb_shinfo(skb)->nr_frags +
3777                        BDS_PER_TX_PKT +
3778                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3779                /* Handle special storage cases separately */
3780                if (txdata->tx_ring_size == 0) {
3781                        struct bnx2x_eth_q_stats *q_stats =
3782                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3783                        q_stats->driver_filtered_tx_pkt++;
3784                        dev_kfree_skb(skb);
3785                        return NETDEV_TX_OK;
3786                }
3787                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3788                netif_tx_stop_queue(txq);
3789                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3790
3791                return NETDEV_TX_BUSY;
3792        }
3793
3794        DP(NETIF_MSG_TX_QUEUED,
3795           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3796           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3797           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3798           skb->len);
3799
3800        eth = (struct ethhdr *)skb->data;
3801
3802        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3803        if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3804                if (is_broadcast_ether_addr(eth->h_dest))
3805                        mac_type = BROADCAST_ADDRESS;
3806                else
3807                        mac_type = MULTICAST_ADDRESS;
3808        }
3809
3810#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3811        /* First, check if we need to linearize the skb (due to FW
3812           restrictions). No need to check fragmentation if page size > 8K
3813           (there will be no violation to FW restrictions) */
3814        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3815                /* Statistics of linearization */
3816                bp->lin_cnt++;
3817                if (skb_linearize(skb) != 0) {
3818                        DP(NETIF_MSG_TX_QUEUED,
3819                           "SKB linearization failed - silently dropping this SKB\n");
3820                        dev_kfree_skb_any(skb);
3821                        return NETDEV_TX_OK;
3822                }
3823        }
3824#endif
3825        /* Map skb linear data for DMA */
3826        mapping = dma_map_single(&bp->pdev->dev, skb->data,
3827                                 skb_headlen(skb), DMA_TO_DEVICE);
3828        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3829                DP(NETIF_MSG_TX_QUEUED,
3830                   "SKB mapping failed - silently dropping this SKB\n");
3831                dev_kfree_skb_any(skb);
3832                return NETDEV_TX_OK;
3833        }
3834        /*
3835        Please read carefully. First we use one BD which we mark as start,
3836        then we have a parsing info BD (used for TSO or xsum),
3837        and only then we have the rest of the TSO BDs.
3838        (don't forget to mark the last one as last,
3839        and to unmap only AFTER you write to the BD ...)
3840        And above all, all pdb sizes are in words - NOT DWORDS!
3841        */
3842
3843        /* get current pkt produced now - advance it just before sending packet
3844         * since mapping of pages may fail and cause packet to be dropped
3845         */
3846        pkt_prod = txdata->tx_pkt_prod;
3847        bd_prod = TX_BD(txdata->tx_bd_prod);
3848
3849        /* get a tx_buf and first BD
3850         * tx_start_bd may be changed during SPLIT,
3851         * but first_bd will always stay first
3852         */
3853        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3854        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3855        first_bd = tx_start_bd;
3856
3857        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3858
3859        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3860                if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3861                        BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3862                } else if (bp->ptp_tx_skb) {
3863                        BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3864                } else {
3865                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3866                        /* schedule check for Tx timestamp */
3867                        bp->ptp_tx_skb = skb_get(skb);
3868                        bp->ptp_tx_start = jiffies;
3869                        schedule_work(&bp->ptp_task);
3870                }
3871        }
3872
3873        /* header nbd: indirectly zero other flags! */
3874        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3875
3876        /* remember the first BD of the packet */
3877        tx_buf->first_bd = txdata->tx_bd_prod;
3878        tx_buf->skb = skb;
3879        tx_buf->flags = 0;
3880
3881        DP(NETIF_MSG_TX_QUEUED,
3882           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3883           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3884
3885        if (skb_vlan_tag_present(skb)) {
3886                tx_start_bd->vlan_or_ethertype =
3887                    cpu_to_le16(skb_vlan_tag_get(skb));
3888                tx_start_bd->bd_flags.as_bitfield |=
3889                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3890        } else {
3891                /* when transmitting in a vf, start bd must hold the ethertype
3892                 * for fw to enforce it
3893                 */
3894                u16 vlan_tci = 0;
3895#ifndef BNX2X_STOP_ON_ERROR
3896                if (IS_VF(bp)) {
3897#endif
3898                        /* Still need to consider inband vlan for enforced */
3899                        if (__vlan_get_tag(skb, &vlan_tci)) {
3900                                tx_start_bd->vlan_or_ethertype =
3901                                        cpu_to_le16(ntohs(eth->h_proto));
3902                        } else {
3903                                tx_start_bd->bd_flags.as_bitfield |=
3904                                        (X_ETH_INBAND_VLAN <<
3905                                         ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3906                                tx_start_bd->vlan_or_ethertype =
3907                                        cpu_to_le16(vlan_tci);
3908                        }
3909#ifndef BNX2X_STOP_ON_ERROR
3910                } else {
3911                        /* used by FW for packet accounting */
3912                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3913                }
3914#endif
3915        }
3916
3917        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3918
3919        /* turn on parsing and get a BD */
3920        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3921
3922        if (xmit_type & XMIT_CSUM)
3923                bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3924
3925        if (!CHIP_IS_E1x(bp)) {
3926                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3927                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3928
3929                if (xmit_type & XMIT_CSUM_ENC) {
3930                        u16 global_data = 0;
3931
3932                        /* Set PBD in enc checksum offload case */
3933                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3934                                                      &pbd_e2_parsing_data,
3935                                                      xmit_type);
3936
3937                        /* turn on 2nd parsing and get a BD */
3938                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3939
3940                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3941
3942                        memset(pbd2, 0, sizeof(*pbd2));
3943
3944                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3945                                (skb_inner_network_header(skb) -
3946                                 skb->data) >> 1;
3947
3948                        if (xmit_type & XMIT_GSO_ENC)
3949                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3950                                                          &global_data,
3951                                                          xmit_type);
3952
3953                        pbd2->global_data = cpu_to_le16(global_data);
3954
3955                        /* add addition parse BD indication to start BD */
3956                        SET_FLAG(tx_start_bd->general_data,
3957                                 ETH_TX_START_BD_PARSE_NBDS, 1);
3958                        /* set encapsulation flag in start BD */
3959                        SET_FLAG(tx_start_bd->general_data,
3960                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3961
3962                        tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3963
3964                        nbd++;
3965                } else if (xmit_type & XMIT_CSUM) {
3966                        /* Set PBD in checksum offload case w/o encapsulation */
3967                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3968                                                     &pbd_e2_parsing_data,
3969                                                     xmit_type);
3970                }
3971
3972                bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3973                /* Add the macs to the parsing BD if this is a vf or if
3974                 * Tx Switching is enabled.
3975                 */
3976                if (IS_VF(bp)) {
3977                        /* override GRE parameters in BD */
3978                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3979                                              &pbd_e2->data.mac_addr.src_mid,
3980                                              &pbd_e2->data.mac_addr.src_lo,
3981                                              eth->h_source);
3982
3983                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3984                                              &pbd_e2->data.mac_addr.dst_mid,
3985                                              &pbd_e2->data.mac_addr.dst_lo,
3986                                              eth->h_dest);
3987                } else {
3988                        if (bp->flags & TX_SWITCHING)
3989                                bnx2x_set_fw_mac_addr(
3990                                                &pbd_e2->data.mac_addr.dst_hi,
3991                                                &pbd_e2->data.mac_addr.dst_mid,
3992                                                &pbd_e2->data.mac_addr.dst_lo,
3993                                                eth->h_dest);
3994#ifdef BNX2X_STOP_ON_ERROR
3995                        /* Enforce security is always set in Stop on Error -
3996                         * source mac should be present in the parsing BD
3997                         */
3998                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3999                                              &pbd_e2->data.mac_addr.src_mid,
4000                                              &pbd_e2->data.mac_addr.src_lo,
4001                                              eth->h_source);
4002#endif
4003                }
4004
4005                SET_FLAG(pbd_e2_parsing_data,
4006                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4007        } else {
4008                u16 global_data = 0;
4009                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4010                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4011                /* Set PBD in checksum offload case */
4012                if (xmit_type & XMIT_CSUM)
4013                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4014
4015                SET_FLAG(global_data,
4016                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4017                pbd_e1x->global_data |= cpu_to_le16(global_data);
4018        }
4019
4020        /* Setup the data pointer of the first BD of the packet */
4021        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4022        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4023        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4024        pkt_size = tx_start_bd->nbytes;
4025
4026        DP(NETIF_MSG_TX_QUEUED,
4027           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4028           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4029           le16_to_cpu(tx_start_bd->nbytes),
4030           tx_start_bd->bd_flags.as_bitfield,
4031           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4032
4033        if (xmit_type & XMIT_GSO) {
4034
4035                DP(NETIF_MSG_TX_QUEUED,
4036                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4037                   skb->len, hlen, skb_headlen(skb),
4038                   skb_shinfo(skb)->gso_size);
4039
4040                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4041
4042                if (unlikely(skb_headlen(skb) > hlen)) {
4043                        nbd++;
4044                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4045                                                 &tx_start_bd, hlen,
4046                                                 bd_prod);
4047                }
4048                if (!CHIP_IS_E1x(bp))
4049                        pbd_e2_parsing_data |=
4050                                (skb_shinfo(skb)->gso_size <<
4051                                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4052                                 ETH_TX_PARSE_BD_E2_LSO_MSS;
4053                else
4054                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4055        }
4056
4057        /* Set the PBD's parsing_data field if not zero
4058         * (for the chips newer than 57711).
4059         */
4060        if (pbd_e2_parsing_data)
4061                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4062
4063        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4064
4065        /* Handle fragmented skb */
4066        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4067                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4068
4069                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4070                                           skb_frag_size(frag), DMA_TO_DEVICE);
4071                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4072                        unsigned int pkts_compl = 0, bytes_compl = 0;
4073
4074                        DP(NETIF_MSG_TX_QUEUED,
4075                           "Unable to map page - dropping packet...\n");
4076
4077                        /* we need unmap all buffers already mapped
4078                         * for this SKB;
4079                         * first_bd->nbd need to be properly updated
4080                         * before call to bnx2x_free_tx_pkt
4081                         */
4082                        first_bd->nbd = cpu_to_le16(nbd);
4083                        bnx2x_free_tx_pkt(bp, txdata,
4084                                          TX_BD(txdata->tx_pkt_prod),
4085                                          &pkts_compl, &bytes_compl);
4086                        return NETDEV_TX_OK;
4087                }
4088
4089                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4090                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4091                if (total_pkt_bd == NULL)
4092                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4093
4094                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4095                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4096                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4097                le16_add_cpu(&pkt_size, skb_frag_size(frag));
4098                nbd++;
4099
4100                DP(NETIF_MSG_TX_QUEUED,
4101                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4102                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4103                   le16_to_cpu(tx_data_bd->nbytes));
4104        }
4105
4106        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4107
4108        /* update with actual num BDs */
4109        first_bd->nbd = cpu_to_le16(nbd);
4110
4111        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4112
4113        /* now send a tx doorbell, counting the next BD
4114         * if the packet contains or ends with it
4115         */
4116        if (TX_BD_POFF(bd_prod) < nbd)
4117                nbd++;
4118
4119        /* total_pkt_bytes should be set on the first data BD if
4120         * it's not an LSO packet and there is more than one
4121         * data BD. In this case pkt_size is limited by an MTU value.
4122         * However we prefer to set it for an LSO packet (while we don't
4123         * have to) in order to save some CPU cycles in a none-LSO
4124         * case, when we much more care about them.
4125         */
4126        if (total_pkt_bd != NULL)
4127                total_pkt_bd->total_pkt_bytes = pkt_size;
4128
4129        if (pbd_e1x)
4130                DP(NETIF_MSG_TX_QUEUED,
4131                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4132                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4133                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4134                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4135                    le16_to_cpu(pbd_e1x->total_hlen_w));
4136        if (pbd_e2)
4137                DP(NETIF_MSG_TX_QUEUED,
4138                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4139                   pbd_e2,
4140                   pbd_e2->data.mac_addr.dst_hi,
4141                   pbd_e2->data.mac_addr.dst_mid,
4142                   pbd_e2->data.mac_addr.dst_lo,
4143                   pbd_e2->data.mac_addr.src_hi,
4144                   pbd_e2->data.mac_addr.src_mid,
4145                   pbd_e2->data.mac_addr.src_lo,
4146                   pbd_e2->parsing_data);
4147        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4148
4149        netdev_tx_sent_queue(txq, skb->len);
4150
4151        skb_tx_timestamp(skb);
4152
4153        txdata->tx_pkt_prod++;
4154        /*
4155         * Make sure that the BD data is updated before updating the producer
4156         * since FW might read the BD right after the producer is updated.
4157         * This is only applicable for weak-ordered memory model archs such
4158         * as IA-64. The following barrier is also mandatory since FW will
4159         * assumes packets must have BDs.
4160         */
4161        wmb();
4162
4163        txdata->tx_db.data.prod += nbd;
4164        /* make sure descriptor update is observed by HW */
4165        wmb();
4166
4167        DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4168
4169        mmiowb();
4170
4171        txdata->tx_bd_prod += nbd;
4172
4173        if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4174                netif_tx_stop_queue(txq);
4175
4176                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4177                 * ordering of set_bit() in netif_tx_stop_queue() and read of
4178                 * fp->bd_tx_cons */
4179                smp_mb();
4180
4181                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4182                if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4183                        netif_tx_wake_queue(txq);
4184        }
4185        txdata->tx_pkt++;
4186
4187        return NETDEV_TX_OK;
4188}
4189
4190void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4191{
4192        int mfw_vn = BP_FW_MB_IDX(bp);
4193        u32 tmp;
4194
4195        /* If the shmem shouldn't affect configuration, reflect */
4196        if (!IS_MF_BD(bp)) {
4197                int i;
4198
4199                for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4200                        c2s_map[i] = i;
4201                *c2s_default = 0;
4202
4203                return;
4204        }
4205
4206        tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4207        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4208        c2s_map[0] = tmp & 0xff;
4209        c2s_map[1] = (tmp >> 8) & 0xff;
4210        c2s_map[2] = (tmp >> 16) & 0xff;
4211        c2s_map[3] = (tmp >> 24) & 0xff;
4212
4213        tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4214        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4215        c2s_map[4] = tmp & 0xff;
4216        c2s_map[5] = (tmp >> 8) & 0xff;
4217        c2s_map[6] = (tmp >> 16) & 0xff;
4218        c2s_map[7] = (tmp >> 24) & 0xff;
4219
4220        tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4221        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4222        *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4223}
4224
4225/**
4226 * bnx2x_setup_tc - routine to configure net_device for multi tc
4227 *
4228 * @netdev: net device to configure
4229 * @tc: number of traffic classes to enable
4230 *
4231 * callback connected to the ndo_setup_tc function pointer
4232 */
4233int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4234{
4235        struct bnx2x *bp = netdev_priv(dev);
4236        u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4237        int cos, prio, count, offset;
4238
4239        /* setup tc must be called under rtnl lock */
4240        ASSERT_RTNL();
4241
4242        /* no traffic classes requested. Aborting */
4243        if (!num_tc) {
4244                netdev_reset_tc(dev);
4245                return 0;
4246        }
4247
4248        /* requested to support too many traffic classes */
4249        if (num_tc > bp->max_cos) {
4250                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4251                          num_tc, bp->max_cos);
4252                return -EINVAL;
4253        }
4254
4255        /* declare amount of supported traffic classes */
4256        if (netdev_set_num_tc(dev, num_tc)) {
4257                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4258                return -EINVAL;
4259        }
4260
4261        bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4262
4263        /* configure priority to traffic class mapping */
4264        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4265                int outer_prio = c2s_map[prio];
4266
4267                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4268                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4269                   "mapping priority %d to tc %d\n",
4270                   outer_prio, bp->prio_to_cos[outer_prio]);
4271        }
4272
4273        /* Use this configuration to differentiate tc0 from other COSes
4274           This can be used for ets or pfc, and save the effort of setting
4275           up a multio class queue disc or negotiating DCBX with a switch
4276        netdev_set_prio_tc_map(dev, 0, 0);
4277        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4278        for (prio = 1; prio < 16; prio++) {
4279                netdev_set_prio_tc_map(dev, prio, 1);
4280                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4281        } */
4282
4283        /* configure traffic class to transmission queue mapping */
4284        for (cos = 0; cos < bp->max_cos; cos++) {
4285                count = BNX2X_NUM_ETH_QUEUES(bp);
4286                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4287                netdev_set_tc_queue(dev, cos, count, offset);
4288                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4289                   "mapping tc %d to offset %d count %d\n",
4290                   cos, offset, count);
4291        }
4292
4293        return 0;
4294}
4295
4296int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4297                     void *type_data)
4298{
4299        struct tc_mqprio_qopt *mqprio = type_data;
4300
4301        if (type != TC_SETUP_QDISC_MQPRIO)
4302                return -EOPNOTSUPP;
4303
4304        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4305
4306        return bnx2x_setup_tc(dev, mqprio->num_tc);
4307}
4308
4309/* called with rtnl_lock */
4310int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4311{
4312        struct sockaddr *addr = p;
4313        struct bnx2x *bp = netdev_priv(dev);
4314        int rc = 0;
4315
4316        if (!is_valid_ether_addr(addr->sa_data)) {
4317                BNX2X_ERR("Requested MAC address is not valid\n");
4318                return -EINVAL;
4319        }
4320
4321        if (IS_MF_STORAGE_ONLY(bp)) {
4322                BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4323                return -EINVAL;
4324        }
4325
4326        if (netif_running(dev))  {
4327                rc = bnx2x_set_eth_mac(bp, false);
4328                if (rc)
4329                        return rc;
4330        }
4331
4332        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4333
4334        if (netif_running(dev))
4335                rc = bnx2x_set_eth_mac(bp, true);
4336
4337        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4338                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4339
4340        return rc;
4341}
4342
4343static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4344{
4345        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4346        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4347        u8 cos;
4348
4349        /* Common */
4350
4351        if (IS_FCOE_IDX(fp_index)) {
4352                memset(sb, 0, sizeof(union host_hc_status_block));
4353                fp->status_blk_mapping = 0;
4354        } else {
4355                /* status blocks */
4356                if (!CHIP_IS_E1x(bp))
4357                        BNX2X_PCI_FREE(sb->e2_sb,
4358                                       bnx2x_fp(bp, fp_index,
4359                                                status_blk_mapping),
4360                                       sizeof(struct host_hc_status_block_e2));
4361                else
4362                        BNX2X_PCI_FREE(sb->e1x_sb,
4363                                       bnx2x_fp(bp, fp_index,
4364                                                status_blk_mapping),
4365                                       sizeof(struct host_hc_status_block_e1x));
4366        }
4367
4368        /* Rx */
4369        if (!skip_rx_queue(bp, fp_index)) {
4370                bnx2x_free_rx_bds(fp);
4371
4372                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4373                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4374                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4375                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4376                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4377
4378                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4379                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4380                               sizeof(struct eth_fast_path_rx_cqe) *
4381                               NUM_RCQ_BD);
4382
4383                /* SGE ring */
4384                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4385                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4386                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4387                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4388        }
4389
4390        /* Tx */
4391        if (!skip_tx_queue(bp, fp_index)) {
4392                /* fastpath tx rings: tx_buf tx_desc */
4393                for_each_cos_in_tx_queue(fp, cos) {
4394                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4395
4396                        DP(NETIF_MSG_IFDOWN,
4397                           "freeing tx memory of fp %d cos %d cid %d\n",
4398                           fp_index, cos, txdata->cid);
4399
4400                        BNX2X_FREE(txdata->tx_buf_ring);
4401                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4402                                txdata->tx_desc_mapping,
4403                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4404                }
4405        }
4406        /* end of fastpath */
4407}
4408
4409static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4410{
4411        int i;
4412        for_each_cnic_queue(bp, i)
4413                bnx2x_free_fp_mem_at(bp, i);
4414}
4415
4416void bnx2x_free_fp_mem(struct bnx2x *bp)
4417{
4418        int i;
4419        for_each_eth_queue(bp, i)
4420                bnx2x_free_fp_mem_at(bp, i);
4421}
4422
4423static void set_sb_shortcuts(struct bnx2x *bp, int index)
4424{
4425        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4426        if (!CHIP_IS_E1x(bp)) {
4427                bnx2x_fp(bp, index, sb_index_values) =
4428                        (__le16 *)status_blk.e2_sb->sb.index_values;
4429                bnx2x_fp(bp, index, sb_running_index) =
4430                        (__le16 *)status_blk.e2_sb->sb.running_index;
4431        } else {
4432                bnx2x_fp(bp, index, sb_index_values) =
4433                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4434                bnx2x_fp(bp, index, sb_running_index) =
4435                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4436        }
4437}
4438
4439/* Returns the number of actually allocated BDs */
4440static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4441                              int rx_ring_size)
4442{
4443        struct bnx2x *bp = fp->bp;
4444        u16 ring_prod, cqe_ring_prod;
4445        int i, failure_cnt = 0;
4446
4447        fp->rx_comp_cons = 0;
4448        cqe_ring_prod = ring_prod = 0;
4449
4450        /* This routine is called only during fo init so
4451         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4452         */
4453        for (i = 0; i < rx_ring_size; i++) {
4454                if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4455                        failure_cnt++;
4456                        continue;
4457                }
4458                ring_prod = NEXT_RX_IDX(ring_prod);
4459                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4460                WARN_ON(ring_prod <= (i - failure_cnt));
4461        }
4462
4463        if (failure_cnt)
4464                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4465                          i - failure_cnt, fp->index);
4466
4467        fp->rx_bd_prod = ring_prod;
4468        /* Limit the CQE producer by the CQE ring size */
4469        fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4470                               cqe_ring_prod);
4471
4472        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4473
4474        return i - failure_cnt;
4475}
4476
4477static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4478{
4479        int i;
4480
4481        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4482                struct eth_rx_cqe_next_page *nextpg;
4483
4484                nextpg = (struct eth_rx_cqe_next_page *)
4485                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4486                nextpg->addr_hi =
4487                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4488                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4489                nextpg->addr_lo =
4490                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4491                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4492        }
4493}
4494
4495static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4496{
4497        union host_hc_status_block *sb;
4498        struct bnx2x_fastpath *fp = &bp->fp[index];
4499        int ring_size = 0;
4500        u8 cos;
4501        int rx_ring_size = 0;
4502
4503        if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4504                rx_ring_size = MIN_RX_SIZE_NONTPA;
4505                bp->rx_ring_size = rx_ring_size;
4506        } else if (!bp->rx_ring_size) {
4507                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4508
4509                if (CHIP_IS_E3(bp)) {
4510                        u32 cfg = SHMEM_RD(bp,
4511                                           dev_info.port_hw_config[BP_PORT(bp)].
4512                                           default_cfg);
4513
4514                        /* Decrease ring size for 1G functions */
4515                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4516                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4517                                rx_ring_size /= 10;
4518                }
4519
4520                /* allocate at least number of buffers required by FW */
4521                rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4522                                     MIN_RX_SIZE_TPA, rx_ring_size);
4523
4524                bp->rx_ring_size = rx_ring_size;
4525        } else /* if rx_ring_size specified - use it */
4526                rx_ring_size = bp->rx_ring_size;
4527
4528        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4529
4530        /* Common */
4531        sb = &bnx2x_fp(bp, index, status_blk);
4532
4533        if (!IS_FCOE_IDX(index)) {
4534                /* status blocks */
4535                if (!CHIP_IS_E1x(bp)) {
4536                        sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4537                                                    sizeof(struct host_hc_status_block_e2));
4538                        if (!sb->e2_sb)
4539                                goto alloc_mem_err;
4540                } else {
4541                        sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4542                                                     sizeof(struct host_hc_status_block_e1x));
4543                        if (!sb->e1x_sb)
4544                                goto alloc_mem_err;
4545                }
4546        }
4547
4548        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4549         * set shortcuts for it.
4550         */
4551        if (!IS_FCOE_IDX(index))
4552                set_sb_shortcuts(bp, index);
4553
4554        /* Tx */
4555        if (!skip_tx_queue(bp, index)) {
4556                /* fastpath tx rings: tx_buf tx_desc */
4557                for_each_cos_in_tx_queue(fp, cos) {
4558                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4559
4560                        DP(NETIF_MSG_IFUP,
4561                           "allocating tx memory of fp %d cos %d\n",
4562                           index, cos);
4563
4564                        txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4565                                                      sizeof(struct sw_tx_bd),
4566                                                      GFP_KERNEL);
4567                        if (!txdata->tx_buf_ring)
4568                                goto alloc_mem_err;
4569                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4570                                                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4571                        if (!txdata->tx_desc_ring)
4572                                goto alloc_mem_err;
4573                }
4574        }
4575
4576        /* Rx */
4577        if (!skip_rx_queue(bp, index)) {
4578                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4579                bnx2x_fp(bp, index, rx_buf_ring) =
4580                        kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4581                if (!bnx2x_fp(bp, index, rx_buf_ring))
4582                        goto alloc_mem_err;
4583                bnx2x_fp(bp, index, rx_desc_ring) =
4584                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4585                                        sizeof(struct eth_rx_bd) * NUM_RX_BD);
4586                if (!bnx2x_fp(bp, index, rx_desc_ring))
4587                        goto alloc_mem_err;
4588
4589                /* Seed all CQEs by 1s */
4590                bnx2x_fp(bp, index, rx_comp_ring) =
4591                        BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4592                                         sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4593                if (!bnx2x_fp(bp, index, rx_comp_ring))
4594                        goto alloc_mem_err;
4595
4596                /* SGE ring */
4597                bnx2x_fp(bp, index, rx_page_ring) =
4598                        kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4599                                GFP_KERNEL);
4600                if (!bnx2x_fp(bp, index, rx_page_ring))
4601                        goto alloc_mem_err;
4602                bnx2x_fp(bp, index, rx_sge_ring) =
4603                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4604                                        BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4605                if (!bnx2x_fp(bp, index, rx_sge_ring))
4606                        goto alloc_mem_err;
4607                /* RX BD ring */
4608                bnx2x_set_next_page_rx_bd(fp);
4609
4610                /* CQ ring */
4611                bnx2x_set_next_page_rx_cq(fp);
4612
4613                /* BDs */
4614                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4615                if (ring_size < rx_ring_size)
4616                        goto alloc_mem_err;
4617        }
4618
4619        return 0;
4620
4621/* handles low memory cases */
4622alloc_mem_err:
4623        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4624                                                index, ring_size);
4625        /* FW will drop all packets if queue is not big enough,
4626         * In these cases we disable the queue
4627         * Min size is different for OOO, TPA and non-TPA queues
4628         */
4629        if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4630                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4631                        /* release memory allocated for this queue */
4632                        bnx2x_free_fp_mem_at(bp, index);
4633                        return -ENOMEM;
4634        }
4635        return 0;
4636}
4637
4638static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4639{
4640        if (!NO_FCOE(bp))
4641                /* FCoE */
4642                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4643                        /* we will fail load process instead of mark
4644                         * NO_FCOE_FLAG
4645                         */
4646                        return -ENOMEM;
4647
4648        return 0;
4649}
4650
4651static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4652{
4653        int i;
4654
4655        /* 1. Allocate FP for leading - fatal if error
4656         * 2. Allocate RSS - fix number of queues if error
4657         */
4658
4659        /* leading */
4660        if (bnx2x_alloc_fp_mem_at(bp, 0))
4661                return -ENOMEM;
4662
4663        /* RSS */
4664        for_each_nondefault_eth_queue(bp, i)
4665                if (bnx2x_alloc_fp_mem_at(bp, i))
4666                        break;
4667
4668        /* handle memory failures */
4669        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4670                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4671
4672                WARN_ON(delta < 0);
4673                bnx2x_shrink_eth_fp(bp, delta);
4674                if (CNIC_SUPPORT(bp))
4675                        /* move non eth FPs next to last eth FP
4676                         * must be done in that order
4677                         * FCOE_IDX < FWD_IDX < OOO_IDX
4678                         */
4679
4680                        /* move FCoE fp even NO_FCOE_FLAG is on */
4681                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4682                bp->num_ethernet_queues -= delta;
4683                bp->num_queues = bp->num_ethernet_queues +
4684                                 bp->num_cnic_queues;
4685                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4686                          bp->num_queues + delta, bp->num_queues);
4687        }
4688
4689        return 0;
4690}
4691
4692void bnx2x_free_mem_bp(struct bnx2x *bp)
4693{
4694        int i;
4695
4696        for (i = 0; i < bp->fp_array_size; i++)
4697                kfree(bp->fp[i].tpa_info);
4698        kfree(bp->fp);
4699        kfree(bp->sp_objs);
4700        kfree(bp->fp_stats);
4701        kfree(bp->bnx2x_txq);
4702        kfree(bp->msix_table);
4703        kfree(bp->ilt);
4704}
4705
4706int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4707{
4708        struct bnx2x_fastpath *fp;
4709        struct msix_entry *tbl;
4710        struct bnx2x_ilt *ilt;
4711        int msix_table_size = 0;
4712        int fp_array_size, txq_array_size;
4713        int i;
4714
4715        /*
4716         * The biggest MSI-X table we might need is as a maximum number of fast
4717         * path IGU SBs plus default SB (for PF only).
4718         */
4719        msix_table_size = bp->igu_sb_cnt;
4720        if (IS_PF(bp))
4721                msix_table_size++;
4722        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4723
4724        /* fp array: RSS plus CNIC related L2 queues */
4725        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4726        bp->fp_array_size = fp_array_size;
4727        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4728
4729        fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4730        if (!fp)
4731                goto alloc_err;
4732        for (i = 0; i < bp->fp_array_size; i++) {
4733                fp[i].tpa_info =
4734                        kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4735                                sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4736                if (!(fp[i].tpa_info))
4737                        goto alloc_err;
4738        }
4739
4740        bp->fp = fp;
4741
4742        /* allocate sp objs */
4743        bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4744                              GFP_KERNEL);
4745        if (!bp->sp_objs)
4746                goto alloc_err;
4747
4748        /* allocate fp_stats */
4749        bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4750                               GFP_KERNEL);
4751        if (!bp->fp_stats)
4752                goto alloc_err;
4753
4754        /* Allocate memory for the transmission queues array */
4755        txq_array_size =
4756                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4757        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4758
4759        bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4760                                GFP_KERNEL);
4761        if (!bp->bnx2x_txq)
4762                goto alloc_err;
4763
4764        /* msix table */
4765        tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4766        if (!tbl)
4767                goto alloc_err;
4768        bp->msix_table = tbl;
4769
4770        /* ilt */
4771        ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4772        if (!ilt)
4773                goto alloc_err;
4774        bp->ilt = ilt;
4775
4776        return 0;
4777alloc_err:
4778        bnx2x_free_mem_bp(bp);
4779        return -ENOMEM;
4780}
4781
4782int bnx2x_reload_if_running(struct net_device *dev)
4783{
4784        struct bnx2x *bp = netdev_priv(dev);
4785
4786        if (unlikely(!netif_running(dev)))
4787                return 0;
4788
4789        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4790        return bnx2x_nic_load(bp, LOAD_NORMAL);
4791}
4792
4793int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4794{
4795        u32 sel_phy_idx = 0;
4796        if (bp->link_params.num_phys <= 1)
4797                return INT_PHY;
4798
4799        if (bp->link_vars.link_up) {
4800                sel_phy_idx = EXT_PHY1;
4801                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4802                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4803                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4804                        sel_phy_idx = EXT_PHY2;
4805        } else {
4806
4807                switch (bnx2x_phy_selection(&bp->link_params)) {
4808                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4809                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4810                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4811                       sel_phy_idx = EXT_PHY1;
4812                       break;
4813                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4814                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4815                       sel_phy_idx = EXT_PHY2;
4816                       break;
4817                }
4818        }
4819
4820        return sel_phy_idx;
4821}
4822int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4823{
4824        u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4825        /*
4826         * The selected activated PHY is always after swapping (in case PHY
4827         * swapping is enabled). So when swapping is enabled, we need to reverse
4828         * the configuration
4829         */
4830
4831        if (bp->link_params.multi_phy_config &
4832            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4833                if (sel_phy_idx == EXT_PHY1)
4834                        sel_phy_idx = EXT_PHY2;
4835                else if (sel_phy_idx == EXT_PHY2)
4836                        sel_phy_idx = EXT_PHY1;
4837        }
4838        return LINK_CONFIG_IDX(sel_phy_idx);
4839}
4840
4841#ifdef NETDEV_FCOE_WWNN
4842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4843{
4844        struct bnx2x *bp = netdev_priv(dev);
4845        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4846
4847        switch (type) {
4848        case NETDEV_FCOE_WWNN:
4849                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4850                                cp->fcoe_wwn_node_name_lo);
4851                break;
4852        case NETDEV_FCOE_WWPN:
4853                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4854                                cp->fcoe_wwn_port_name_lo);
4855                break;
4856        default:
4857                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4858                return -EINVAL;
4859        }
4860
4861        return 0;
4862}
4863#endif
4864
4865/* called with rtnl_lock */
4866int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4867{
4868        struct bnx2x *bp = netdev_priv(dev);
4869
4870        if (pci_num_vf(bp->pdev)) {
4871                DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4872                return -EPERM;
4873        }
4874
4875        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4876                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4877                return -EAGAIN;
4878        }
4879
4880        /* This does not race with packet allocation
4881         * because the actual alloc size is
4882         * only updated as part of load
4883         */
4884        dev->mtu = new_mtu;
4885
4886        if (!bnx2x_mtu_allows_gro(new_mtu))
4887                dev->features &= ~NETIF_F_GRO_HW;
4888
4889        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4890                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4891
4892        return bnx2x_reload_if_running(dev);
4893}
4894
4895netdev_features_t bnx2x_fix_features(struct net_device *dev,
4896                                     netdev_features_t features)
4897{
4898        struct bnx2x *bp = netdev_priv(dev);
4899
4900        if (pci_num_vf(bp->pdev)) {
4901                netdev_features_t changed = dev->features ^ features;
4902
4903                /* Revert the requested changes in features if they
4904                 * would require internal reload of PF in bnx2x_set_features().
4905                 */
4906                if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4907                        features &= ~NETIF_F_RXCSUM;
4908                        features |= dev->features & NETIF_F_RXCSUM;
4909                }
4910
4911                if (changed & NETIF_F_LOOPBACK) {
4912                        features &= ~NETIF_F_LOOPBACK;
4913                        features |= dev->features & NETIF_F_LOOPBACK;
4914                }
4915        }
4916
4917        /* TPA requires Rx CSUM offloading */
4918        if (!(features & NETIF_F_RXCSUM))
4919                features &= ~NETIF_F_LRO;
4920
4921        if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4922                features &= ~NETIF_F_GRO_HW;
4923        if (features & NETIF_F_GRO_HW)
4924                features &= ~NETIF_F_LRO;
4925
4926        return features;
4927}
4928
4929int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4930{
4931        struct bnx2x *bp = netdev_priv(dev);
4932        netdev_features_t changes = features ^ dev->features;
4933        bool bnx2x_reload = false;
4934        int rc;
4935
4936        /* VFs or non SRIOV PFs should be able to change loopback feature */
4937        if (!pci_num_vf(bp->pdev)) {
4938                if (features & NETIF_F_LOOPBACK) {
4939                        if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4940                                bp->link_params.loopback_mode = LOOPBACK_BMAC;
4941                                bnx2x_reload = true;
4942                        }
4943                } else {
4944                        if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4945                                bp->link_params.loopback_mode = LOOPBACK_NONE;
4946                                bnx2x_reload = true;
4947                        }
4948                }
4949        }
4950
4951        /* Don't care about GRO changes */
4952        changes &= ~NETIF_F_GRO;
4953
4954        if (changes)
4955                bnx2x_reload = true;
4956
4957        if (bnx2x_reload) {
4958                if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4959                        dev->features = features;
4960                        rc = bnx2x_reload_if_running(dev);
4961                        return rc ? rc : 1;
4962                }
4963                /* else: bnx2x_nic_load() will be called at end of recovery */
4964        }
4965
4966        return 0;
4967}
4968
4969void bnx2x_tx_timeout(struct net_device *dev)
4970{
4971        struct bnx2x *bp = netdev_priv(dev);
4972
4973        /* We want the information of the dump logged,
4974         * but calling bnx2x_panic() would kill all chances of recovery.
4975         */
4976        if (!bp->panic)
4977#ifndef BNX2X_STOP_ON_ERROR
4978                bnx2x_panic_dump(bp, false);
4979#else
4980                bnx2x_panic();
4981#endif
4982
4983        /* This allows the netif to be shutdown gracefully before resetting */
4984        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4985}
4986
4987int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4988{
4989        struct net_device *dev = pci_get_drvdata(pdev);
4990        struct bnx2x *bp;
4991
4992        if (!dev) {
4993                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4994                return -ENODEV;
4995        }
4996        bp = netdev_priv(dev);
4997
4998        rtnl_lock();
4999
5000        pci_save_state(pdev);
5001
5002        if (!netif_running(dev)) {
5003                rtnl_unlock();
5004                return 0;
5005        }
5006
5007        netif_device_detach(dev);
5008
5009        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5010
5011        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5012
5013        rtnl_unlock();
5014
5015        return 0;
5016}
5017
5018int bnx2x_resume(struct pci_dev *pdev)
5019{
5020        struct net_device *dev = pci_get_drvdata(pdev);
5021        struct bnx2x *bp;
5022        int rc;
5023
5024        if (!dev) {
5025                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5026                return -ENODEV;
5027        }
5028        bp = netdev_priv(dev);
5029
5030        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5031                BNX2X_ERR("Handling parity error recovery. Try again later\n");
5032                return -EAGAIN;
5033        }
5034
5035        rtnl_lock();
5036
5037        pci_restore_state(pdev);
5038
5039        if (!netif_running(dev)) {
5040                rtnl_unlock();
5041                return 0;
5042        }
5043
5044        bnx2x_set_power_state(bp, PCI_D0);
5045        netif_device_attach(dev);
5046
5047        rc = bnx2x_nic_load(bp, LOAD_OPEN);
5048
5049        rtnl_unlock();
5050
5051        return rc;
5052}
5053
5054void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5055                              u32 cid)
5056{
5057        if (!cxt) {
5058                BNX2X_ERR("bad context pointer %p\n", cxt);
5059                return;
5060        }
5061
5062        /* ustorm cxt validation */
5063        cxt->ustorm_ag_context.cdu_usage =
5064                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5065                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5066        /* xcontext validation */
5067        cxt->xstorm_ag_context.cdu_reserved =
5068                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5070}
5071
5072static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5073                                    u8 fw_sb_id, u8 sb_index,
5074                                    u8 ticks)
5075{
5076        u32 addr = BAR_CSTRORM_INTMEM +
5077                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5078        REG_WR8(bp, addr, ticks);
5079        DP(NETIF_MSG_IFUP,
5080           "port %x fw_sb_id %d sb_index %d ticks %d\n",
5081           port, fw_sb_id, sb_index, ticks);
5082}
5083
5084static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5085                                    u16 fw_sb_id, u8 sb_index,
5086                                    u8 disable)
5087{
5088        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5089        u32 addr = BAR_CSTRORM_INTMEM +
5090                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5091        u8 flags = REG_RD8(bp, addr);
5092        /* clear and set */
5093        flags &= ~HC_INDEX_DATA_HC_ENABLED;
5094        flags |= enable_flag;
5095        REG_WR8(bp, addr, flags);
5096        DP(NETIF_MSG_IFUP,
5097           "port %x fw_sb_id %d sb_index %d disable %d\n",
5098           port, fw_sb_id, sb_index, disable);
5099}
5100
5101void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5102                                    u8 sb_index, u8 disable, u16 usec)
5103{
5104        int port = BP_PORT(bp);
5105        u8 ticks = usec / BNX2X_BTR;
5106
5107        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5108
5109        disable = disable ? 1 : (usec ? 0 : 1);
5110        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5111}
5112
5113void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5114                            u32 verbose)
5115{
5116        smp_mb__before_atomic();
5117        set_bit(flag, &bp->sp_rtnl_state);
5118        smp_mb__after_atomic();
5119        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5120           flag);
5121        schedule_delayed_work(&bp->sp_rtnl_task, 0);
5122}
5123