linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: QLogic Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 * Copyright (c) 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation.
  10 *
  11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  12 * Written by: Eliezer Tamir
  13 * Based on code from Michael Chan's bnx2 driver
  14 * UDP CSUM errata workaround by Arik Gendelman
  15 * Slowpath and fastpath rework by Vladislav Zolotarov
  16 * Statistics and Link management by Yitchak Gertner
  17 *
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/interrupt.h>
  25#include <linux/ip.h>
  26#include <linux/crash_dump.h>
  27#include <net/tcp.h>
  28#include <net/ipv6.h>
  29#include <net/ip6_checksum.h>
  30#include <linux/prefetch.h>
  31#include "bnx2x_cmn.h"
  32#include "bnx2x_init.h"
  33#include "bnx2x_sp.h"
  34
  35static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  36static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
  37static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  38static int bnx2x_poll(struct napi_struct *napi, int budget);
  39
  40static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
  41{
  42        int i;
  43
  44        /* Add NAPI objects */
  45        for_each_rx_queue_cnic(bp, i) {
  46                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  47                               bnx2x_poll, NAPI_POLL_WEIGHT);
  48        }
  49}
  50
  51static void bnx2x_add_all_napi(struct bnx2x *bp)
  52{
  53        int i;
  54
  55        /* Add NAPI objects */
  56        for_each_eth_queue(bp, i) {
  57                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  58                               bnx2x_poll, NAPI_POLL_WEIGHT);
  59        }
  60}
  61
  62static int bnx2x_calc_num_queues(struct bnx2x *bp)
  63{
  64        int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
  65
  66        /* Reduce memory usage in kdump environment by using only one queue */
  67        if (is_kdump_kernel())
  68                nq = 1;
  69
  70        nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
  71        return nq;
  72}
  73
  74/**
  75 * bnx2x_move_fp - move content of the fastpath structure.
  76 *
  77 * @bp:         driver handle
  78 * @from:       source FP index
  79 * @to:         destination FP index
  80 *
  81 * Makes sure the contents of the bp->fp[to].napi is kept
  82 * intact. This is done by first copying the napi struct from
  83 * the target to the source, and then mem copying the entire
  84 * source onto the target. Update txdata pointers and related
  85 * content.
  86 */
  87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  88{
  89        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  90        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  91        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  92        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  93        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  94        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  95        int old_max_eth_txqs, new_max_eth_txqs;
  96        int old_txdata_index = 0, new_txdata_index = 0;
  97        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  98
  99        /* Copy the NAPI object as it has been already initialized */
 100        from_fp->napi = to_fp->napi;
 101
 102        /* Move bnx2x_fastpath contents */
 103        memcpy(to_fp, from_fp, sizeof(*to_fp));
 104        to_fp->index = to;
 105
 106        /* Retain the tpa_info of the original `to' version as we don't want
 107         * 2 FPs to contain the same tpa_info pointer.
 108         */
 109        to_fp->tpa_info = old_tpa_info;
 110
 111        /* move sp_objs contents as well, as their indices match fp ones */
 112        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
 113
 114        /* move fp_stats contents as well, as their indices match fp ones */
 115        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
 116
 117        /* Update txdata pointers in fp and move txdata content accordingly:
 118         * Each fp consumes 'max_cos' txdata structures, so the index should be
 119         * decremented by max_cos x delta.
 120         */
 121
 122        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
 123        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
 124                                (bp)->max_cos;
 125        if (from == FCOE_IDX(bp)) {
 126                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 127                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 128        }
 129
 130        memcpy(&bp->bnx2x_txq[new_txdata_index],
 131               &bp->bnx2x_txq[old_txdata_index],
 132               sizeof(struct bnx2x_fp_txdata));
 133        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 134}
 135
 136/**
 137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 138 *
 139 * @bp:        driver handle
 140 * @buf:       character buffer to fill with the fw name
 141 * @buf_len:   length of the above buffer
 142 *
 143 */
 144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 145{
 146        if (IS_PF(bp)) {
 147                u8 phy_fw_ver[PHY_FW_VER_LEN];
 148
 149                phy_fw_ver[0] = '\0';
 150                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 151                                             phy_fw_ver, PHY_FW_VER_LEN);
 152                strlcpy(buf, bp->fw_ver, buf_len);
 153                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 154                         "bc %d.%d.%d%s%s",
 155                         (bp->common.bc_ver & 0xff0000) >> 16,
 156                         (bp->common.bc_ver & 0xff00) >> 8,
 157                         (bp->common.bc_ver & 0xff),
 158                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 159        } else {
 160                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 161        }
 162}
 163
 164/**
 165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 166 *
 167 * @bp: driver handle
 168 * @delta:      number of eth queues which were not allocated
 169 */
 170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 171{
 172        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 173
 174        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 175         * backward along the array could cause memory to be overridden
 176         */
 177        for (cos = 1; cos < bp->max_cos; cos++) {
 178                for (i = 0; i < old_eth_num - delta; i++) {
 179                        struct bnx2x_fastpath *fp = &bp->fp[i];
 180                        int new_idx = cos * (old_eth_num - delta) + i;
 181
 182                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 183                               sizeof(struct bnx2x_fp_txdata));
 184                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 185                }
 186        }
 187}
 188
 189int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 190
 191/* free skb in the packet ring at pos idx
 192 * return idx of last bd freed
 193 */
 194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 195                             u16 idx, unsigned int *pkts_compl,
 196                             unsigned int *bytes_compl)
 197{
 198        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 199        struct eth_tx_start_bd *tx_start_bd;
 200        struct eth_tx_bd *tx_data_bd;
 201        struct sk_buff *skb = tx_buf->skb;
 202        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 203        int nbd;
 204        u16 split_bd_len = 0;
 205
 206        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 207        prefetch(&skb->end);
 208
 209        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 210           txdata->txq_index, idx, tx_buf, skb);
 211
 212        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 213
 214        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 215#ifdef BNX2X_STOP_ON_ERROR
 216        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 217                BNX2X_ERR("BAD nbd!\n");
 218                bnx2x_panic();
 219        }
 220#endif
 221        new_cons = nbd + tx_buf->first_bd;
 222
 223        /* Get the next bd */
 224        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 225
 226        /* Skip a parse bd... */
 227        --nbd;
 228        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 229
 230        if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
 231                /* Skip second parse bd... */
 232                --nbd;
 233                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 234        }
 235
 236        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 237        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 238                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 239                split_bd_len = BD_UNMAP_LEN(tx_data_bd);
 240                --nbd;
 241                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 242        }
 243
 244        /* unmap first bd */
 245        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 246                         BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
 247                         DMA_TO_DEVICE);
 248
 249        /* now free frags */
 250        while (nbd > 0) {
 251
 252                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 253                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 254                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 255                if (--nbd)
 256                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 257        }
 258
 259        /* release skb */
 260        WARN_ON(!skb);
 261        if (likely(skb)) {
 262                (*pkts_compl)++;
 263                (*bytes_compl) += skb->len;
 264                dev_kfree_skb_any(skb);
 265        }
 266
 267        tx_buf->first_bd = 0;
 268        tx_buf->skb = NULL;
 269
 270        return new_cons;
 271}
 272
 273int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 274{
 275        struct netdev_queue *txq;
 276        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 277        unsigned int pkts_compl = 0, bytes_compl = 0;
 278
 279#ifdef BNX2X_STOP_ON_ERROR
 280        if (unlikely(bp->panic))
 281                return -1;
 282#endif
 283
 284        txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 285        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 286        sw_cons = txdata->tx_pkt_cons;
 287
 288        /* Ensure subsequent loads occur after hw_cons */
 289        smp_rmb();
 290
 291        while (sw_cons != hw_cons) {
 292                u16 pkt_cons;
 293
 294                pkt_cons = TX_BD(sw_cons);
 295
 296                DP(NETIF_MSG_TX_DONE,
 297                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 298                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 299
 300                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 301                                            &pkts_compl, &bytes_compl);
 302
 303                sw_cons++;
 304        }
 305
 306        netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 307
 308        txdata->tx_pkt_cons = sw_cons;
 309        txdata->tx_bd_cons = bd_cons;
 310
 311        /* Need to make the tx_bd_cons update visible to start_xmit()
 312         * before checking for netif_tx_queue_stopped().  Without the
 313         * memory barrier, there is a small possibility that
 314         * start_xmit() will miss it and cause the queue to be stopped
 315         * forever.
 316         * On the other hand we need an rmb() here to ensure the proper
 317         * ordering of bit testing in the following
 318         * netif_tx_queue_stopped(txq) call.
 319         */
 320        smp_mb();
 321
 322        if (unlikely(netif_tx_queue_stopped(txq))) {
 323                /* Taking tx_lock() is needed to prevent re-enabling the queue
 324                 * while it's empty. This could have happen if rx_action() gets
 325                 * suspended in bnx2x_tx_int() after the condition before
 326                 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 327                 *
 328                 * stops the queue->sees fresh tx_bd_cons->releases the queue->
 329                 * sends some packets consuming the whole queue again->
 330                 * stops the queue
 331                 */
 332
 333                __netif_tx_lock(txq, smp_processor_id());
 334
 335                if ((netif_tx_queue_stopped(txq)) &&
 336                    (bp->state == BNX2X_STATE_OPEN) &&
 337                    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
 338                        netif_tx_wake_queue(txq);
 339
 340                __netif_tx_unlock(txq);
 341        }
 342        return 0;
 343}
 344
 345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 346                                             u16 idx)
 347{
 348        u16 last_max = fp->last_max_sge;
 349
 350        if (SUB_S16(idx, last_max) > 0)
 351                fp->last_max_sge = idx;
 352}
 353
 354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 355                                         u16 sge_len,
 356                                         struct eth_end_agg_rx_cqe *cqe)
 357{
 358        struct bnx2x *bp = fp->bp;
 359        u16 last_max, last_elem, first_elem;
 360        u16 delta = 0;
 361        u16 i;
 362
 363        if (!sge_len)
 364                return;
 365
 366        /* First mark all used pages */
 367        for (i = 0; i < sge_len; i++)
 368                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 369                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 370
 371        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 372           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 373
 374        /* Here we assume that the last SGE index is the biggest */
 375        prefetch((void *)(fp->sge_mask));
 376        bnx2x_update_last_max_sge(fp,
 377                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 378
 379        last_max = RX_SGE(fp->last_max_sge);
 380        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 381        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 382
 383        /* If ring is not full */
 384        if (last_elem + 1 != first_elem)
 385                last_elem++;
 386
 387        /* Now update the prod */
 388        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 389                if (likely(fp->sge_mask[i]))
 390                        break;
 391
 392                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 393                delta += BIT_VEC64_ELEM_SZ;
 394        }
 395
 396        if (delta > 0) {
 397                fp->rx_sge_prod += delta;
 398                /* clear page-end entries */
 399                bnx2x_clear_sge_mask_next_elems(fp);
 400        }
 401
 402        DP(NETIF_MSG_RX_STATUS,
 403           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 404           fp->last_max_sge, fp->rx_sge_prod);
 405}
 406
 407/* Get Toeplitz hash value in the skb using the value from the
 408 * CQE (calculated by HW).
 409 */
 410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 411                            const struct eth_fast_path_rx_cqe *cqe,
 412                            enum pkt_hash_types *rxhash_type)
 413{
 414        /* Get Toeplitz hash from CQE */
 415        if ((bp->dev->features & NETIF_F_RXHASH) &&
 416            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 417                enum eth_rss_hash_type htype;
 418
 419                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 420                *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 421                                (htype == TCP_IPV6_HASH_TYPE)) ?
 422                               PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 423
 424                return le32_to_cpu(cqe->rss_hash_result);
 425        }
 426        *rxhash_type = PKT_HASH_TYPE_NONE;
 427        return 0;
 428}
 429
 430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 431                            u16 cons, u16 prod,
 432                            struct eth_fast_path_rx_cqe *cqe)
 433{
 434        struct bnx2x *bp = fp->bp;
 435        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 436        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 437        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 438        dma_addr_t mapping;
 439        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 440        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 441
 442        /* print error if current state != stop */
 443        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 444                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 445
 446        /* Try to map an empty data buffer from the aggregation info  */
 447        mapping = dma_map_single(&bp->pdev->dev,
 448                                 first_buf->data + NET_SKB_PAD,
 449                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 450        /*
 451         *  ...if it fails - move the skb from the consumer to the producer
 452         *  and set the current aggregation state as ERROR to drop it
 453         *  when TPA_STOP arrives.
 454         */
 455
 456        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 457                /* Move the BD from the consumer to the producer */
 458                bnx2x_reuse_rx_data(fp, cons, prod);
 459                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 460                return;
 461        }
 462
 463        /* move empty data from pool to prod */
 464        prod_rx_buf->data = first_buf->data;
 465        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 466        /* point prod_bd to new data */
 467        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 468        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 469
 470        /* move partial skb from cons to pool (don't unmap yet) */
 471        *first_buf = *cons_rx_buf;
 472
 473        /* mark bin state as START */
 474        tpa_info->parsing_flags =
 475                le16_to_cpu(cqe->pars_flags.flags);
 476        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 477        tpa_info->tpa_state = BNX2X_TPA_START;
 478        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 479        tpa_info->placement_offset = cqe->placement_offset;
 480        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 481        if (fp->mode == TPA_MODE_GRO) {
 482                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 483                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 484                tpa_info->gro_size = gro_size;
 485        }
 486
 487#ifdef BNX2X_STOP_ON_ERROR
 488        fp->tpa_queue_used |= (1 << queue);
 489        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 490           fp->tpa_queue_used);
 491#endif
 492}
 493
 494/* Timestamp option length allowed for TPA aggregation:
 495 *
 496 *              nop nop kind length echo val
 497 */
 498#define TPA_TSTAMP_OPT_LEN      12
 499/**
 500 * bnx2x_set_gro_params - compute GRO values
 501 *
 502 * @skb:                packet skb
 503 * @parsing_flags:      parsing flags from the START CQE
 504 * @len_on_bd:          total length of the first packet for the
 505 *                      aggregation.
 506 * @pkt_len:            length of all segments
 507 *
 508 * Approximate value of the MSS for this aggregation calculated using
 509 * the first packet of it.
 510 * Compute number of aggregated segments, and gso_type.
 511 */
 512static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 513                                 u16 len_on_bd, unsigned int pkt_len,
 514                                 u16 num_of_coalesced_segs)
 515{
 516        /* TPA aggregation won't have either IP options or TCP options
 517         * other than timestamp or IPv6 extension headers.
 518         */
 519        u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 520
 521        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 522            PRS_FLAG_OVERETH_IPV6) {
 523                hdrs_len += sizeof(struct ipv6hdr);
 524                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 525        } else {
 526                hdrs_len += sizeof(struct iphdr);
 527                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 528        }
 529
 530        /* Check if there was a TCP timestamp, if there is it's will
 531         * always be 12 bytes length: nop nop kind length echo val.
 532         *
 533         * Otherwise FW would close the aggregation.
 534         */
 535        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 536                hdrs_len += TPA_TSTAMP_OPT_LEN;
 537
 538        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 539
 540        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 541         * to skb_shinfo(skb)->gso_segs
 542         */
 543        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 544}
 545
 546static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 547                              u16 index, gfp_t gfp_mask)
 548{
 549        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 550        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 551        struct bnx2x_alloc_pool *pool = &fp->page_pool;
 552        dma_addr_t mapping;
 553
 554        if (!pool->page) {
 555                pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
 556                if (unlikely(!pool->page))
 557                        return -ENOMEM;
 558
 559                pool->offset = 0;
 560        }
 561
 562        mapping = dma_map_page(&bp->pdev->dev, pool->page,
 563                               pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 564        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 565                BNX2X_ERR("Can't map sge\n");
 566                return -ENOMEM;
 567        }
 568
 569        sw_buf->page = pool->page;
 570        sw_buf->offset = pool->offset;
 571
 572        dma_unmap_addr_set(sw_buf, mapping, mapping);
 573
 574        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 575        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 576
 577        pool->offset += SGE_PAGE_SIZE;
 578        if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
 579                get_page(pool->page);
 580        else
 581                pool->page = NULL;
 582        return 0;
 583}
 584
 585static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 586                               struct bnx2x_agg_info *tpa_info,
 587                               u16 pages,
 588                               struct sk_buff *skb,
 589                               struct eth_end_agg_rx_cqe *cqe,
 590                               u16 cqe_idx)
 591{
 592        struct sw_rx_page *rx_pg, old_rx_pg;
 593        u32 i, frag_len, frag_size;
 594        int err, j, frag_id = 0;
 595        u16 len_on_bd = tpa_info->len_on_bd;
 596        u16 full_page = 0, gro_size = 0;
 597
 598        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 599
 600        if (fp->mode == TPA_MODE_GRO) {
 601                gro_size = tpa_info->gro_size;
 602                full_page = tpa_info->full_page;
 603        }
 604
 605        /* This is needed in order to enable forwarding support */
 606        if (frag_size)
 607                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 608                                     le16_to_cpu(cqe->pkt_len),
 609                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 610
 611#ifdef BNX2X_STOP_ON_ERROR
 612        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 613                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 614                          pages, cqe_idx);
 615                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 616                bnx2x_panic();
 617                return -EINVAL;
 618        }
 619#endif
 620
 621        /* Run through the SGL and compose the fragmented skb */
 622        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 623                u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 624
 625                /* FW gives the indices of the SGE as if the ring is an array
 626                   (meaning that "next" element will consume 2 indices) */
 627                if (fp->mode == TPA_MODE_GRO)
 628                        frag_len = min_t(u32, frag_size, (u32)full_page);
 629                else /* LRO */
 630                        frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
 631
 632                rx_pg = &fp->rx_page_ring[sge_idx];
 633                old_rx_pg = *rx_pg;
 634
 635                /* If we fail to allocate a substitute page, we simply stop
 636                   where we are and drop the whole packet */
 637                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
 638                if (unlikely(err)) {
 639                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 640                        return err;
 641                }
 642
 643                dma_unmap_page(&bp->pdev->dev,
 644                               dma_unmap_addr(&old_rx_pg, mapping),
 645                               SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 646                /* Add one frag and update the appropriate fields in the skb */
 647                if (fp->mode == TPA_MODE_LRO)
 648                        skb_fill_page_desc(skb, j, old_rx_pg.page,
 649                                           old_rx_pg.offset, frag_len);
 650                else { /* GRO */
 651                        int rem;
 652                        int offset = 0;
 653                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 654                                int len = rem > gro_size ? gro_size : rem;
 655                                skb_fill_page_desc(skb, frag_id++,
 656                                                   old_rx_pg.page,
 657                                                   old_rx_pg.offset + offset,
 658                                                   len);
 659                                if (offset)
 660                                        get_page(old_rx_pg.page);
 661                                offset += len;
 662                        }
 663                }
 664
 665                skb->data_len += frag_len;
 666                skb->truesize += SGE_PAGES;
 667                skb->len += frag_len;
 668
 669                frag_size -= frag_len;
 670        }
 671
 672        return 0;
 673}
 674
 675static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 676{
 677        if (fp->rx_frag_size)
 678                skb_free_frag(data);
 679        else
 680                kfree(data);
 681}
 682
 683static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 684{
 685        if (fp->rx_frag_size) {
 686                /* GFP_KERNEL allocations are used only during initialization */
 687                if (unlikely(gfpflags_allow_blocking(gfp_mask)))
 688                        return (void *)__get_free_page(gfp_mask);
 689
 690                return napi_alloc_frag(fp->rx_frag_size);
 691        }
 692
 693        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 694}
 695
 696#ifdef CONFIG_INET
 697static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 698{
 699        const struct iphdr *iph = ip_hdr(skb);
 700        struct tcphdr *th;
 701
 702        skb_set_transport_header(skb, sizeof(struct iphdr));
 703        th = tcp_hdr(skb);
 704
 705        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 706                                  iph->saddr, iph->daddr, 0);
 707}
 708
 709static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 710{
 711        struct ipv6hdr *iph = ipv6_hdr(skb);
 712        struct tcphdr *th;
 713
 714        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 715        th = tcp_hdr(skb);
 716
 717        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 718                                  &iph->saddr, &iph->daddr, 0);
 719}
 720
 721static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 722                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 723{
 724        skb_reset_network_header(skb);
 725        gro_func(bp, skb);
 726        tcp_gro_complete(skb);
 727}
 728#endif
 729
 730static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 731                               struct sk_buff *skb)
 732{
 733#ifdef CONFIG_INET
 734        if (skb_shinfo(skb)->gso_size) {
 735                switch (be16_to_cpu(skb->protocol)) {
 736                case ETH_P_IP:
 737                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 738                        break;
 739                case ETH_P_IPV6:
 740                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 741                        break;
 742                default:
 743                        netdev_WARN_ONCE(bp->dev,
 744                                         "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 745                                         be16_to_cpu(skb->protocol));
 746                }
 747        }
 748#endif
 749        skb_record_rx_queue(skb, fp->rx_queue);
 750        napi_gro_receive(&fp->napi, skb);
 751}
 752
 753static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 754                           struct bnx2x_agg_info *tpa_info,
 755                           u16 pages,
 756                           struct eth_end_agg_rx_cqe *cqe,
 757                           u16 cqe_idx)
 758{
 759        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 760        u8 pad = tpa_info->placement_offset;
 761        u16 len = tpa_info->len_on_bd;
 762        struct sk_buff *skb = NULL;
 763        u8 *new_data, *data = rx_buf->data;
 764        u8 old_tpa_state = tpa_info->tpa_state;
 765
 766        tpa_info->tpa_state = BNX2X_TPA_STOP;
 767
 768        /* If we there was an error during the handling of the TPA_START -
 769         * drop this aggregation.
 770         */
 771        if (old_tpa_state == BNX2X_TPA_ERROR)
 772                goto drop;
 773
 774        /* Try to allocate the new data */
 775        new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
 776        /* Unmap skb in the pool anyway, as we are going to change
 777           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 778           fails. */
 779        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 780                         fp->rx_buf_size, DMA_FROM_DEVICE);
 781        if (likely(new_data))
 782                skb = build_skb(data, fp->rx_frag_size);
 783
 784        if (likely(skb)) {
 785#ifdef BNX2X_STOP_ON_ERROR
 786                if (pad + len > fp->rx_buf_size) {
 787                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 788                                  pad, len, fp->rx_buf_size);
 789                        bnx2x_panic();
 790                        return;
 791                }
 792#endif
 793
 794                skb_reserve(skb, pad + NET_SKB_PAD);
 795                skb_put(skb, len);
 796                skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 797
 798                skb->protocol = eth_type_trans(skb, bp->dev);
 799                skb->ip_summed = CHECKSUM_UNNECESSARY;
 800
 801                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 802                                         skb, cqe, cqe_idx)) {
 803                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 804                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
 805                        bnx2x_gro_receive(bp, fp, skb);
 806                } else {
 807                        DP(NETIF_MSG_RX_STATUS,
 808                           "Failed to allocate new pages - dropping packet!\n");
 809                        dev_kfree_skb_any(skb);
 810                }
 811
 812                /* put new data in bin */
 813                rx_buf->data = new_data;
 814
 815                return;
 816        }
 817        if (new_data)
 818                bnx2x_frag_free(fp, new_data);
 819drop:
 820        /* drop the packet and keep the buffer in the bin */
 821        DP(NETIF_MSG_RX_STATUS,
 822           "Failed to allocate or map a new skb - dropping packet!\n");
 823        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 824}
 825
 826static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 827                               u16 index, gfp_t gfp_mask)
 828{
 829        u8 *data;
 830        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 831        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 832        dma_addr_t mapping;
 833
 834        data = bnx2x_frag_alloc(fp, gfp_mask);
 835        if (unlikely(data == NULL))
 836                return -ENOMEM;
 837
 838        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 839                                 fp->rx_buf_size,
 840                                 DMA_FROM_DEVICE);
 841        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 842                bnx2x_frag_free(fp, data);
 843                BNX2X_ERR("Can't map rx data\n");
 844                return -ENOMEM;
 845        }
 846
 847        rx_buf->data = data;
 848        dma_unmap_addr_set(rx_buf, mapping, mapping);
 849
 850        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 851        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 852
 853        return 0;
 854}
 855
 856static
 857void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 858                                 struct bnx2x_fastpath *fp,
 859                                 struct bnx2x_eth_q_stats *qstats)
 860{
 861        /* Do nothing if no L4 csum validation was done.
 862         * We do not check whether IP csum was validated. For IPv4 we assume
 863         * that if the card got as far as validating the L4 csum, it also
 864         * validated the IP csum. IPv6 has no IP csum.
 865         */
 866        if (cqe->fast_path_cqe.status_flags &
 867            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 868                return;
 869
 870        /* If L4 validation was done, check if an error was found. */
 871
 872        if (cqe->fast_path_cqe.type_error_flags &
 873            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 874             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 875                qstats->hw_csum_err++;
 876        else
 877                skb->ip_summed = CHECKSUM_UNNECESSARY;
 878}
 879
 880static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 881{
 882        struct bnx2x *bp = fp->bp;
 883        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 884        u16 sw_comp_cons, sw_comp_prod;
 885        int rx_pkt = 0;
 886        union eth_rx_cqe *cqe;
 887        struct eth_fast_path_rx_cqe *cqe_fp;
 888
 889#ifdef BNX2X_STOP_ON_ERROR
 890        if (unlikely(bp->panic))
 891                return 0;
 892#endif
 893        if (budget <= 0)
 894                return rx_pkt;
 895
 896        bd_cons = fp->rx_bd_cons;
 897        bd_prod = fp->rx_bd_prod;
 898        bd_prod_fw = bd_prod;
 899        sw_comp_cons = fp->rx_comp_cons;
 900        sw_comp_prod = fp->rx_comp_prod;
 901
 902        comp_ring_cons = RCQ_BD(sw_comp_cons);
 903        cqe = &fp->rx_comp_ring[comp_ring_cons];
 904        cqe_fp = &cqe->fast_path_cqe;
 905
 906        DP(NETIF_MSG_RX_STATUS,
 907           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 908
 909        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 910                struct sw_rx_bd *rx_buf = NULL;
 911                struct sk_buff *skb;
 912                u8 cqe_fp_flags;
 913                enum eth_rx_cqe_type cqe_fp_type;
 914                u16 len, pad, queue;
 915                u8 *data;
 916                u32 rxhash;
 917                enum pkt_hash_types rxhash_type;
 918
 919#ifdef BNX2X_STOP_ON_ERROR
 920                if (unlikely(bp->panic))
 921                        return 0;
 922#endif
 923
 924                bd_prod = RX_BD(bd_prod);
 925                bd_cons = RX_BD(bd_cons);
 926
 927                /* A rmb() is required to ensure that the CQE is not read
 928                 * before it is written by the adapter DMA.  PCI ordering
 929                 * rules will make sure the other fields are written before
 930                 * the marker at the end of struct eth_fast_path_rx_cqe
 931                 * but without rmb() a weakly ordered processor can process
 932                 * stale data.  Without the barrier TPA state-machine might
 933                 * enter inconsistent state and kernel stack might be
 934                 * provided with incorrect packet description - these lead
 935                 * to various kernel crashed.
 936                 */
 937                rmb();
 938
 939                cqe_fp_flags = cqe_fp->type_error_flags;
 940                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 941
 942                DP(NETIF_MSG_RX_STATUS,
 943                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 944                   CQE_TYPE(cqe_fp_flags),
 945                   cqe_fp_flags, cqe_fp->status_flags,
 946                   le32_to_cpu(cqe_fp->rss_hash_result),
 947                   le16_to_cpu(cqe_fp->vlan_tag),
 948                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 949
 950                /* is this a slowpath msg? */
 951                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 952                        bnx2x_sp_event(fp, cqe);
 953                        goto next_cqe;
 954                }
 955
 956                rx_buf = &fp->rx_buf_ring[bd_cons];
 957                data = rx_buf->data;
 958
 959                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 960                        struct bnx2x_agg_info *tpa_info;
 961                        u16 frag_size, pages;
 962#ifdef BNX2X_STOP_ON_ERROR
 963                        /* sanity check */
 964                        if (fp->mode == TPA_MODE_DISABLED &&
 965                            (CQE_TYPE_START(cqe_fp_type) ||
 966                             CQE_TYPE_STOP(cqe_fp_type)))
 967                                BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
 968                                          CQE_TYPE(cqe_fp_type));
 969#endif
 970
 971                        if (CQE_TYPE_START(cqe_fp_type)) {
 972                                u16 queue = cqe_fp->queue_index;
 973                                DP(NETIF_MSG_RX_STATUS,
 974                                   "calling tpa_start on queue %d\n",
 975                                   queue);
 976
 977                                bnx2x_tpa_start(fp, queue,
 978                                                bd_cons, bd_prod,
 979                                                cqe_fp);
 980
 981                                goto next_rx;
 982                        }
 983                        queue = cqe->end_agg_cqe.queue_index;
 984                        tpa_info = &fp->tpa_info[queue];
 985                        DP(NETIF_MSG_RX_STATUS,
 986                           "calling tpa_stop on queue %d\n",
 987                           queue);
 988
 989                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 990                                    tpa_info->len_on_bd;
 991
 992                        if (fp->mode == TPA_MODE_GRO)
 993                                pages = (frag_size + tpa_info->full_page - 1) /
 994                                         tpa_info->full_page;
 995                        else
 996                                pages = SGE_PAGE_ALIGN(frag_size) >>
 997                                        SGE_PAGE_SHIFT;
 998
 999                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1000                                       &cqe->end_agg_cqe, comp_ring_cons);
1001#ifdef BNX2X_STOP_ON_ERROR
1002                        if (bp->panic)
1003                                return 0;
1004#endif
1005
1006                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007                        goto next_cqe;
1008                }
1009                /* non TPA */
1010                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1011                pad = cqe_fp->placement_offset;
1012                dma_sync_single_for_cpu(&bp->pdev->dev,
1013                                        dma_unmap_addr(rx_buf, mapping),
1014                                        pad + RX_COPY_THRESH,
1015                                        DMA_FROM_DEVICE);
1016                pad += NET_SKB_PAD;
1017                prefetch(data + pad); /* speedup eth_type_trans() */
1018                /* is this an error packet? */
1019                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1020                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021                           "ERROR  flags %x  rx packet %u\n",
1022                           cqe_fp_flags, sw_comp_cons);
1023                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024                        goto reuse_rx;
1025                }
1026
1027                /* Since we don't have a jumbo ring
1028                 * copy small packets if mtu > 1500
1029                 */
1030                if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1031                    (len <= RX_COPY_THRESH)) {
1032                        skb = napi_alloc_skb(&fp->napi, len);
1033                        if (skb == NULL) {
1034                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035                                   "ERROR  packet dropped because of alloc failure\n");
1036                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037                                goto reuse_rx;
1038                        }
1039                        memcpy(skb->data, data + pad, len);
1040                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1041                } else {
1042                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1043                                                       GFP_ATOMIC) == 0)) {
1044                                dma_unmap_single(&bp->pdev->dev,
1045                                                 dma_unmap_addr(rx_buf, mapping),
1046                                                 fp->rx_buf_size,
1047                                                 DMA_FROM_DEVICE);
1048                                skb = build_skb(data, fp->rx_frag_size);
1049                                if (unlikely(!skb)) {
1050                                        bnx2x_frag_free(fp, data);
1051                                        bnx2x_fp_qstats(bp, fp)->
1052                                                        rx_skb_alloc_failed++;
1053                                        goto next_rx;
1054                                }
1055                                skb_reserve(skb, pad);
1056                        } else {
1057                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1058                                   "ERROR  packet dropped because of alloc failure\n");
1059                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1060reuse_rx:
1061                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1062                                goto next_rx;
1063                        }
1064                }
1065
1066                skb_put(skb, len);
1067                skb->protocol = eth_type_trans(skb, bp->dev);
1068
1069                /* Set Toeplitz hash for a none-LRO skb */
1070                rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1071                skb_set_hash(skb, rxhash, rxhash_type);
1072
1073                skb_checksum_none_assert(skb);
1074
1075                if (bp->dev->features & NETIF_F_RXCSUM)
1076                        bnx2x_csum_validate(skb, cqe, fp,
1077                                            bnx2x_fp_qstats(bp, fp));
1078
1079                skb_record_rx_queue(skb, fp->rx_queue);
1080
1081                /* Check if this packet was timestamped */
1082                if (unlikely(cqe->fast_path_cqe.type_error_flags &
1083                             (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1084                        bnx2x_set_rx_ts(bp, skb);
1085
1086                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1087                    PARSING_FLAGS_VLAN)
1088                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1089                                               le16_to_cpu(cqe_fp->vlan_tag));
1090
1091                napi_gro_receive(&fp->napi, skb);
1092next_rx:
1093                rx_buf->data = NULL;
1094
1095                bd_cons = NEXT_RX_IDX(bd_cons);
1096                bd_prod = NEXT_RX_IDX(bd_prod);
1097                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098                rx_pkt++;
1099next_cqe:
1100                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1101                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1102
1103                /* mark CQE as free */
1104                BNX2X_SEED_CQE(cqe_fp);
1105
1106                if (rx_pkt == budget)
1107                        break;
1108
1109                comp_ring_cons = RCQ_BD(sw_comp_cons);
1110                cqe = &fp->rx_comp_ring[comp_ring_cons];
1111                cqe_fp = &cqe->fast_path_cqe;
1112        } /* while */
1113
1114        fp->rx_bd_cons = bd_cons;
1115        fp->rx_bd_prod = bd_prod_fw;
1116        fp->rx_comp_cons = sw_comp_cons;
1117        fp->rx_comp_prod = sw_comp_prod;
1118
1119        /* Update producers */
1120        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1121                             fp->rx_sge_prod);
1122
1123        return rx_pkt;
1124}
1125
1126static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1127{
1128        struct bnx2x_fastpath *fp = fp_cookie;
1129        struct bnx2x *bp = fp->bp;
1130        u8 cos;
1131
1132        DP(NETIF_MSG_INTR,
1133           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1134           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1135
1136        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1137
1138#ifdef BNX2X_STOP_ON_ERROR
1139        if (unlikely(bp->panic))
1140                return IRQ_HANDLED;
1141#endif
1142
1143        /* Handle Rx and Tx according to MSI-X vector */
1144        for_each_cos_in_tx_queue(fp, cos)
1145                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1146
1147        prefetch(&fp->sb_running_index[SM_RX_ID]);
1148        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1149
1150        return IRQ_HANDLED;
1151}
1152
1153/* HW Lock for shared dual port PHYs */
1154void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1155{
1156        mutex_lock(&bp->port.phy_mutex);
1157
1158        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159}
1160
1161void bnx2x_release_phy_lock(struct bnx2x *bp)
1162{
1163        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1164
1165        mutex_unlock(&bp->port.phy_mutex);
1166}
1167
1168/* calculates MF speed according to current linespeed and MF configuration */
1169u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1170{
1171        u16 line_speed = bp->link_vars.line_speed;
1172        if (IS_MF(bp)) {
1173                u16 maxCfg = bnx2x_extract_max_cfg(bp,
1174                                                   bp->mf_config[BP_VN(bp)]);
1175
1176                /* Calculate the current MAX line speed limit for the MF
1177                 * devices
1178                 */
1179                if (IS_MF_PERCENT_BW(bp))
1180                        line_speed = (line_speed * maxCfg) / 100;
1181                else { /* SD mode */
1182                        u16 vn_max_rate = maxCfg * 100;
1183
1184                        if (vn_max_rate < line_speed)
1185                                line_speed = vn_max_rate;
1186                }
1187        }
1188
1189        return line_speed;
1190}
1191
1192/**
1193 * bnx2x_fill_report_data - fill link report data to report
1194 *
1195 * @bp:         driver handle
1196 * @data:       link state to update
1197 *
1198 * It uses a none-atomic bit operations because is called under the mutex.
1199 */
1200static void bnx2x_fill_report_data(struct bnx2x *bp,
1201                                   struct bnx2x_link_report_data *data)
1202{
1203        memset(data, 0, sizeof(*data));
1204
1205        if (IS_PF(bp)) {
1206                /* Fill the report data: effective line speed */
1207                data->line_speed = bnx2x_get_mf_speed(bp);
1208
1209                /* Link is down */
1210                if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1211                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1212                                  &data->link_report_flags);
1213
1214                if (!BNX2X_NUM_ETH_QUEUES(bp))
1215                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1216                                  &data->link_report_flags);
1217
1218                /* Full DUPLEX */
1219                if (bp->link_vars.duplex == DUPLEX_FULL)
1220                        __set_bit(BNX2X_LINK_REPORT_FD,
1221                                  &data->link_report_flags);
1222
1223                /* Rx Flow Control is ON */
1224                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1225                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1226                                  &data->link_report_flags);
1227
1228                /* Tx Flow Control is ON */
1229                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1230                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1231                                  &data->link_report_flags);
1232        } else { /* VF */
1233                *data = bp->vf_link_vars;
1234        }
1235}
1236
1237/**
1238 * bnx2x_link_report - report link status to OS.
1239 *
1240 * @bp:         driver handle
1241 *
1242 * Calls the __bnx2x_link_report() under the same locking scheme
1243 * as a link/PHY state managing code to ensure a consistent link
1244 * reporting.
1245 */
1246
1247void bnx2x_link_report(struct bnx2x *bp)
1248{
1249        bnx2x_acquire_phy_lock(bp);
1250        __bnx2x_link_report(bp);
1251        bnx2x_release_phy_lock(bp);
1252}
1253
1254/**
1255 * __bnx2x_link_report - report link status to OS.
1256 *
1257 * @bp:         driver handle
1258 *
1259 * None atomic implementation.
1260 * Should be called under the phy_lock.
1261 */
1262void __bnx2x_link_report(struct bnx2x *bp)
1263{
1264        struct bnx2x_link_report_data cur_data;
1265
1266        if (bp->force_link_down) {
1267                bp->link_vars.link_up = 0;
1268                return;
1269        }
1270
1271        /* reread mf_cfg */
1272        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1273                bnx2x_read_mf_cfg(bp);
1274
1275        /* Read the current link report info */
1276        bnx2x_fill_report_data(bp, &cur_data);
1277
1278        /* Don't report link down or exactly the same link status twice */
1279        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1280            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281                      &bp->last_reported_link.link_report_flags) &&
1282             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283                      &cur_data.link_report_flags)))
1284                return;
1285
1286        bp->link_cnt++;
1287
1288        /* We are going to report a new link parameters now -
1289         * remember the current data for the next time.
1290         */
1291        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1292
1293        /* propagate status to VFs */
1294        if (IS_PF(bp))
1295                bnx2x_iov_link_update(bp);
1296
1297        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298                     &cur_data.link_report_flags)) {
1299                netif_carrier_off(bp->dev);
1300                netdev_err(bp->dev, "NIC Link is Down\n");
1301                return;
1302        } else {
1303                const char *duplex;
1304                const char *flow;
1305
1306                netif_carrier_on(bp->dev);
1307
1308                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1309                                       &cur_data.link_report_flags))
1310                        duplex = "full";
1311                else
1312                        duplex = "half";
1313
1314                /* Handle the FC at the end so that only these flags would be
1315                 * possibly set. This way we may easily check if there is no FC
1316                 * enabled.
1317                 */
1318                if (cur_data.link_report_flags) {
1319                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1320                                     &cur_data.link_report_flags)) {
1321                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1322                                     &cur_data.link_report_flags))
1323                                        flow = "ON - receive & transmit";
1324                                else
1325                                        flow = "ON - receive";
1326                        } else {
1327                                flow = "ON - transmit";
1328                        }
1329                } else {
1330                        flow = "none";
1331                }
1332                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1333                            cur_data.line_speed, duplex, flow);
1334        }
1335}
1336
1337static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338{
1339        int i;
1340
1341        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1342                struct eth_rx_sge *sge;
1343
1344                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1345                sge->addr_hi =
1346                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1347                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348
1349                sge->addr_lo =
1350                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1351                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352        }
1353}
1354
1355static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1356                                struct bnx2x_fastpath *fp, int last)
1357{
1358        int i;
1359
1360        for (i = 0; i < last; i++) {
1361                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1362                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1363                u8 *data = first_buf->data;
1364
1365                if (data == NULL) {
1366                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367                        continue;
1368                }
1369                if (tpa_info->tpa_state == BNX2X_TPA_START)
1370                        dma_unmap_single(&bp->pdev->dev,
1371                                         dma_unmap_addr(first_buf, mapping),
1372                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1373                bnx2x_frag_free(fp, data);
1374                first_buf->data = NULL;
1375        }
1376}
1377
1378void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379{
1380        int j;
1381
1382        for_each_rx_queue_cnic(bp, j) {
1383                struct bnx2x_fastpath *fp = &bp->fp[j];
1384
1385                fp->rx_bd_cons = 0;
1386
1387                /* Activate BD ring */
1388                /* Warning!
1389                 * this will generate an interrupt (to the TSTORM)
1390                 * must only be done after chip is initialized
1391                 */
1392                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1393                                     fp->rx_sge_prod);
1394        }
1395}
1396
1397void bnx2x_init_rx_rings(struct bnx2x *bp)
1398{
1399        int func = BP_FUNC(bp);
1400        u16 ring_prod;
1401        int i, j;
1402
1403        /* Allocate TPA resources */
1404        for_each_eth_queue(bp, j) {
1405                struct bnx2x_fastpath *fp = &bp->fp[j];
1406
1407                DP(NETIF_MSG_IFUP,
1408                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1409
1410                if (fp->mode != TPA_MODE_DISABLED) {
1411                        /* Fill the per-aggregation pool */
1412                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1413                                struct bnx2x_agg_info *tpa_info =
1414                                        &fp->tpa_info[i];
1415                                struct sw_rx_bd *first_buf =
1416                                        &tpa_info->first_buf;
1417
1418                                first_buf->data =
1419                                        bnx2x_frag_alloc(fp, GFP_KERNEL);
1420                                if (!first_buf->data) {
1421                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1422                                                  j);
1423                                        bnx2x_free_tpa_pool(bp, fp, i);
1424                                        fp->mode = TPA_MODE_DISABLED;
1425                                        break;
1426                                }
1427                                dma_unmap_addr_set(first_buf, mapping, 0);
1428                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1429                        }
1430
1431                        /* "next page" elements initialization */
1432                        bnx2x_set_next_page_sgl(fp);
1433
1434                        /* set SGEs bit mask */
1435                        bnx2x_init_sge_ring_bit_mask(fp);
1436
1437                        /* Allocate SGEs and initialize the ring elements */
1438                        for (i = 0, ring_prod = 0;
1439                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1440
1441                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1442                                                       GFP_KERNEL) < 0) {
1443                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1444                                                  i);
1445                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1446                                                  j);
1447                                        /* Cleanup already allocated elements */
1448                                        bnx2x_free_rx_sge_range(bp, fp,
1449                                                                ring_prod);
1450                                        bnx2x_free_tpa_pool(bp, fp,
1451                                                            MAX_AGG_QS(bp));
1452                                        fp->mode = TPA_MODE_DISABLED;
1453                                        ring_prod = 0;
1454                                        break;
1455                                }
1456                                ring_prod = NEXT_SGE_IDX(ring_prod);
1457                        }
1458
1459                        fp->rx_sge_prod = ring_prod;
1460                }
1461        }
1462
1463        for_each_eth_queue(bp, j) {
1464                struct bnx2x_fastpath *fp = &bp->fp[j];
1465
1466                fp->rx_bd_cons = 0;
1467
1468                /* Activate BD ring */
1469                /* Warning!
1470                 * this will generate an interrupt (to the TSTORM)
1471                 * must only be done after chip is initialized
1472                 */
1473                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1474                                     fp->rx_sge_prod);
1475
1476                if (j != 0)
1477                        continue;
1478
1479                if (CHIP_IS_E1(bp)) {
1480                        REG_WR(bp, BAR_USTRORM_INTMEM +
1481                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1482                               U64_LO(fp->rx_comp_mapping));
1483                        REG_WR(bp, BAR_USTRORM_INTMEM +
1484                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1485                               U64_HI(fp->rx_comp_mapping));
1486                }
1487        }
1488}
1489
1490static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491{
1492        u8 cos;
1493        struct bnx2x *bp = fp->bp;
1494
1495        for_each_cos_in_tx_queue(fp, cos) {
1496                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1497                unsigned pkts_compl = 0, bytes_compl = 0;
1498
1499                u16 sw_prod = txdata->tx_pkt_prod;
1500                u16 sw_cons = txdata->tx_pkt_cons;
1501
1502                while (sw_cons != sw_prod) {
1503                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1504                                          &pkts_compl, &bytes_compl);
1505                        sw_cons++;
1506                }
1507
1508                netdev_tx_reset_queue(
1509                        netdev_get_tx_queue(bp->dev,
1510                                            txdata->txq_index));
1511        }
1512}
1513
1514static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515{
1516        int i;
1517
1518        for_each_tx_queue_cnic(bp, i) {
1519                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520        }
1521}
1522
1523static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524{
1525        int i;
1526
1527        for_each_eth_queue(bp, i) {
1528                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529        }
1530}
1531
1532static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1533{
1534        struct bnx2x *bp = fp->bp;
1535        int i;
1536
1537        /* ring wasn't allocated */
1538        if (fp->rx_buf_ring == NULL)
1539                return;
1540
1541        for (i = 0; i < NUM_RX_BD; i++) {
1542                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1543                u8 *data = rx_buf->data;
1544
1545                if (data == NULL)
1546                        continue;
1547                dma_unmap_single(&bp->pdev->dev,
1548                                 dma_unmap_addr(rx_buf, mapping),
1549                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1550
1551                rx_buf->data = NULL;
1552                bnx2x_frag_free(fp, data);
1553        }
1554}
1555
1556static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557{
1558        int j;
1559
1560        for_each_rx_queue_cnic(bp, j) {
1561                bnx2x_free_rx_bds(&bp->fp[j]);
1562        }
1563}
1564
1565static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566{
1567        int j;
1568
1569        for_each_eth_queue(bp, j) {
1570                struct bnx2x_fastpath *fp = &bp->fp[j];
1571
1572                bnx2x_free_rx_bds(fp);
1573
1574                if (fp->mode != TPA_MODE_DISABLED)
1575                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576        }
1577}
1578
1579static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1580{
1581        bnx2x_free_tx_skbs_cnic(bp);
1582        bnx2x_free_rx_skbs_cnic(bp);
1583}
1584
1585void bnx2x_free_skbs(struct bnx2x *bp)
1586{
1587        bnx2x_free_tx_skbs(bp);
1588        bnx2x_free_rx_skbs(bp);
1589}
1590
1591void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1592{
1593        /* load old values */
1594        u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1595
1596        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1597                /* leave all but MAX value */
1598                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1599
1600                /* set new MAX value */
1601                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1602                                & FUNC_MF_CFG_MAX_BW_MASK;
1603
1604                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1605        }
1606}
1607
1608/**
1609 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1610 *
1611 * @bp:         driver handle
1612 * @nvecs:      number of vectors to be released
1613 */
1614static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615{
1616        int i, offset = 0;
1617
1618        if (nvecs == offset)
1619                return;
1620
1621        /* VFs don't have a default SB */
1622        if (IS_PF(bp)) {
1623                free_irq(bp->msix_table[offset].vector, bp->dev);
1624                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1625                   bp->msix_table[offset].vector);
1626                offset++;
1627        }
1628
1629        if (CNIC_SUPPORT(bp)) {
1630                if (nvecs == offset)
1631                        return;
1632                offset++;
1633        }
1634
1635        for_each_eth_queue(bp, i) {
1636                if (nvecs == offset)
1637                        return;
1638                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1639                   i, bp->msix_table[offset].vector);
1640
1641                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642        }
1643}
1644
1645void bnx2x_free_irq(struct bnx2x *bp)
1646{
1647        if (bp->flags & USING_MSIX_FLAG &&
1648            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1649                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1650
1651                /* vfs don't have a default status block */
1652                if (IS_PF(bp))
1653                        nvecs++;
1654
1655                bnx2x_free_msix_irqs(bp, nvecs);
1656        } else {
1657                free_irq(bp->dev->irq, bp->dev);
1658        }
1659}
1660
1661int bnx2x_enable_msix(struct bnx2x *bp)
1662{
1663        int msix_vec = 0, i, rc;
1664
1665        /* VFs don't have a default status block */
1666        if (IS_PF(bp)) {
1667                bp->msix_table[msix_vec].entry = msix_vec;
1668                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1669                               bp->msix_table[0].entry);
1670                msix_vec++;
1671        }
1672
1673        /* Cnic requires an msix vector for itself */
1674        if (CNIC_SUPPORT(bp)) {
1675                bp->msix_table[msix_vec].entry = msix_vec;
1676                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1677                               msix_vec, bp->msix_table[msix_vec].entry);
1678                msix_vec++;
1679        }
1680
1681        /* We need separate vectors for ETH queues only (not FCoE) */
1682        for_each_eth_queue(bp, i) {
1683                bp->msix_table[msix_vec].entry = msix_vec;
1684                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1685                               msix_vec, msix_vec, i);
1686                msix_vec++;
1687        }
1688
1689        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690           msix_vec);
1691
1692        rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1693                                   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1694        /*
1695         * reconfigure number of tx/rx queues according to available
1696         * MSI-X vectors
1697         */
1698        if (rc == -ENOSPC) {
1699                /* Get by with single vector */
1700                rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1701                if (rc < 0) {
1702                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1703                                       rc);
1704                        goto no_msix;
1705                }
1706
1707                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1708                bp->flags |= USING_SINGLE_MSIX_FLAG;
1709
1710                BNX2X_DEV_INFO("set number of queues to 1\n");
1711                bp->num_ethernet_queues = 1;
1712                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1713        } else if (rc < 0) {
1714                BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1715                goto no_msix;
1716        } else if (rc < msix_vec) {
1717                /* how less vectors we will have? */
1718                int diff = msix_vec - rc;
1719
1720                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721
1722                /*
1723                 * decrease number of queues by number of unallocated entries
1724                 */
1725                bp->num_ethernet_queues -= diff;
1726                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1727
1728                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729                               bp->num_queues);
1730        }
1731
1732        bp->flags |= USING_MSIX_FLAG;
1733
1734        return 0;
1735
1736no_msix:
1737        /* fall to INTx if not enough memory */
1738        if (rc == -ENOMEM)
1739                bp->flags |= DISABLE_MSI_FLAG;
1740
1741        return rc;
1742}
1743
1744static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1745{
1746        int i, rc, offset = 0;
1747
1748        /* no default status block for vf */
1749        if (IS_PF(bp)) {
1750                rc = request_irq(bp->msix_table[offset++].vector,
1751                                 bnx2x_msix_sp_int, 0,
1752                                 bp->dev->name, bp->dev);
1753                if (rc) {
1754                        BNX2X_ERR("request sp irq failed\n");
1755                        return -EBUSY;
1756                }
1757        }
1758
1759        if (CNIC_SUPPORT(bp))
1760                offset++;
1761
1762        for_each_eth_queue(bp, i) {
1763                struct bnx2x_fastpath *fp = &bp->fp[i];
1764                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765                         bp->dev->name, i);
1766
1767                rc = request_irq(bp->msix_table[offset].vector,
1768                                 bnx2x_msix_fp_int, 0, fp->name, fp);
1769                if (rc) {
1770                        BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1771                              bp->msix_table[offset].vector, rc);
1772                        bnx2x_free_msix_irqs(bp, offset);
1773                        return -EBUSY;
1774                }
1775
1776                offset++;
1777        }
1778
1779        i = BNX2X_NUM_ETH_QUEUES(bp);
1780        if (IS_PF(bp)) {
1781                offset = 1 + CNIC_SUPPORT(bp);
1782                netdev_info(bp->dev,
1783                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1784                            bp->msix_table[0].vector,
1785                            0, bp->msix_table[offset].vector,
1786                            i - 1, bp->msix_table[offset + i - 1].vector);
1787        } else {
1788                offset = CNIC_SUPPORT(bp);
1789                netdev_info(bp->dev,
1790                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1791                            0, bp->msix_table[offset].vector,
1792                            i - 1, bp->msix_table[offset + i - 1].vector);
1793        }
1794        return 0;
1795}
1796
1797int bnx2x_enable_msi(struct bnx2x *bp)
1798{
1799        int rc;
1800
1801        rc = pci_enable_msi(bp->pdev);
1802        if (rc) {
1803                BNX2X_DEV_INFO("MSI is not attainable\n");
1804                return -1;
1805        }
1806        bp->flags |= USING_MSI_FLAG;
1807
1808        return 0;
1809}
1810
1811static int bnx2x_req_irq(struct bnx2x *bp)
1812{
1813        unsigned long flags;
1814        unsigned int irq;
1815
1816        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817                flags = 0;
1818        else
1819                flags = IRQF_SHARED;
1820
1821        if (bp->flags & USING_MSIX_FLAG)
1822                irq = bp->msix_table[0].vector;
1823        else
1824                irq = bp->pdev->irq;
1825
1826        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827}
1828
1829static int bnx2x_setup_irqs(struct bnx2x *bp)
1830{
1831        int rc = 0;
1832        if (bp->flags & USING_MSIX_FLAG &&
1833            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1834                rc = bnx2x_req_msix_irqs(bp);
1835                if (rc)
1836                        return rc;
1837        } else {
1838                rc = bnx2x_req_irq(bp);
1839                if (rc) {
1840                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1841                        return rc;
1842                }
1843                if (bp->flags & USING_MSI_FLAG) {
1844                        bp->dev->irq = bp->pdev->irq;
1845                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1846                                    bp->dev->irq);
1847                }
1848                if (bp->flags & USING_MSIX_FLAG) {
1849                        bp->dev->irq = bp->msix_table[0].vector;
1850                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1851                                    bp->dev->irq);
1852                }
1853        }
1854
1855        return 0;
1856}
1857
1858static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859{
1860        int i;
1861
1862        for_each_rx_queue_cnic(bp, i) {
1863                napi_enable(&bnx2x_fp(bp, i, napi));
1864        }
1865}
1866
1867static void bnx2x_napi_enable(struct bnx2x *bp)
1868{
1869        int i;
1870
1871        for_each_eth_queue(bp, i) {
1872                napi_enable(&bnx2x_fp(bp, i, napi));
1873        }
1874}
1875
1876static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877{
1878        int i;
1879
1880        for_each_rx_queue_cnic(bp, i) {
1881                napi_disable(&bnx2x_fp(bp, i, napi));
1882        }
1883}
1884
1885static void bnx2x_napi_disable(struct bnx2x *bp)
1886{
1887        int i;
1888
1889        for_each_eth_queue(bp, i) {
1890                napi_disable(&bnx2x_fp(bp, i, napi));
1891        }
1892}
1893
1894void bnx2x_netif_start(struct bnx2x *bp)
1895{
1896        if (netif_running(bp->dev)) {
1897                bnx2x_napi_enable(bp);
1898                if (CNIC_LOADED(bp))
1899                        bnx2x_napi_enable_cnic(bp);
1900                bnx2x_int_enable(bp);
1901                if (bp->state == BNX2X_STATE_OPEN)
1902                        netif_tx_wake_all_queues(bp->dev);
1903        }
1904}
1905
1906void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1907{
1908        bnx2x_int_disable_sync(bp, disable_hw);
1909        bnx2x_napi_disable(bp);
1910        if (CNIC_LOADED(bp))
1911                bnx2x_napi_disable_cnic(bp);
1912}
1913
1914u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1915                       struct net_device *sb_dev)
1916{
1917        struct bnx2x *bp = netdev_priv(dev);
1918
1919        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1920                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1921                u16 ether_type = ntohs(hdr->h_proto);
1922
1923                /* Skip VLAN tag if present */
1924                if (ether_type == ETH_P_8021Q) {
1925                        struct vlan_ethhdr *vhdr =
1926                                (struct vlan_ethhdr *)skb->data;
1927
1928                        ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929                }
1930
1931                /* If ethertype is FCoE or FIP - use FCoE ring */
1932                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1933                        return bnx2x_fcoe_tx(bp, txq_index);
1934        }
1935
1936        /* select a non-FCoE queue */
1937        return netdev_pick_tx(dev, skb, NULL) %
1938                        (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1939}
1940
1941void bnx2x_set_num_queues(struct bnx2x *bp)
1942{
1943        /* RSS queues */
1944        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1945
1946        /* override in STORAGE SD modes */
1947        if (IS_MF_STORAGE_ONLY(bp))
1948                bp->num_ethernet_queues = 1;
1949
1950        /* Add special queues */
1951        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1952        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1953
1954        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1955}
1956
1957/**
1958 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1959 *
1960 * @bp:         Driver handle
1961 *
1962 * We currently support for at most 16 Tx queues for each CoS thus we will
1963 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1964 * bp->max_cos.
1965 *
1966 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1967 * index after all ETH L2 indices.
1968 *
1969 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1970 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1971 * 16..31,...) with indices that are not coupled with any real Tx queue.
1972 *
1973 * The proper configuration of skb->queue_mapping is handled by
1974 * bnx2x_select_queue() and __skb_tx_hash().
1975 *
1976 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1977 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1978 */
1979static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1980{
1981        int rc, tx, rx;
1982
1983        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1984        rx = BNX2X_NUM_ETH_QUEUES(bp);
1985
1986/* account for fcoe queue */
1987        if (include_cnic && !NO_FCOE(bp)) {
1988                rx++;
1989                tx++;
1990        }
1991
1992        rc = netif_set_real_num_tx_queues(bp->dev, tx);
1993        if (rc) {
1994                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1995                return rc;
1996        }
1997        rc = netif_set_real_num_rx_queues(bp->dev, rx);
1998        if (rc) {
1999                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2000                return rc;
2001        }
2002
2003        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2004                          tx, rx);
2005
2006        return rc;
2007}
2008
2009static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2010{
2011        int i;
2012
2013        for_each_queue(bp, i) {
2014                struct bnx2x_fastpath *fp = &bp->fp[i];
2015                u32 mtu;
2016
2017                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2018                if (IS_FCOE_IDX(i))
2019                        /*
2020                         * Although there are no IP frames expected to arrive to
2021                         * this ring we still want to add an
2022                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2023                         * overrun attack.
2024                         */
2025                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2026                else
2027                        mtu = bp->dev->mtu;
2028                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2029                                  IP_HEADER_ALIGNMENT_PADDING +
2030                                  ETH_OVERHEAD +
2031                                  mtu +
2032                                  BNX2X_FW_RX_ALIGN_END;
2033                fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2034                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2035                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2036                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2037                else
2038                        fp->rx_frag_size = 0;
2039        }
2040}
2041
2042static int bnx2x_init_rss(struct bnx2x *bp)
2043{
2044        int i;
2045        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2046
2047        /* Prepare the initial contents for the indirection table if RSS is
2048         * enabled
2049         */
2050        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2051                bp->rss_conf_obj.ind_table[i] =
2052                        bp->fp->cl_id +
2053                        ethtool_rxfh_indir_default(i, num_eth_queues);
2054
2055        /*
2056         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2057         * per-port, so if explicit configuration is needed , do it only
2058         * for a PMF.
2059         *
2060         * For 57712 and newer on the other hand it's a per-function
2061         * configuration.
2062         */
2063        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2064}
2065
2066int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2067              bool config_hash, bool enable)
2068{
2069        struct bnx2x_config_rss_params params = {NULL};
2070
2071        /* Although RSS is meaningless when there is a single HW queue we
2072         * still need it enabled in order to have HW Rx hash generated.
2073         *
2074         * if (!is_eth_multi(bp))
2075         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2076         */
2077
2078        params.rss_obj = rss_obj;
2079
2080        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2081
2082        if (enable) {
2083                __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2084
2085                /* RSS configuration */
2086                __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2087                __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2088                __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2089                __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2090                if (rss_obj->udp_rss_v4)
2091                        __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2092                if (rss_obj->udp_rss_v6)
2093                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2094
2095                if (!CHIP_IS_E1x(bp)) {
2096                        /* valid only for TUNN_MODE_VXLAN tunnel mode */
2097                        __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2098                        __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2099
2100                        /* valid only for TUNN_MODE_GRE tunnel mode */
2101                        __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2102                }
2103        } else {
2104                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2105        }
2106
2107        /* Hash bits */
2108        params.rss_result_mask = MULTI_MASK;
2109
2110        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2111
2112        if (config_hash) {
2113                /* RSS keys */
2114                netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2115                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2116        }
2117
2118        if (IS_PF(bp))
2119                return bnx2x_config_rss(bp, &params);
2120        else
2121                return bnx2x_vfpf_config_rss(bp, &params);
2122}
2123
2124static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2125{
2126        struct bnx2x_func_state_params func_params = {NULL};
2127
2128        /* Prepare parameters for function state transitions */
2129        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2130
2131        func_params.f_obj = &bp->func_obj;
2132        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2133
2134        func_params.params.hw_init.load_phase = load_code;
2135
2136        return bnx2x_func_state_change(bp, &func_params);
2137}
2138
2139/*
2140 * Cleans the object that have internal lists without sending
2141 * ramrods. Should be run when interrupts are disabled.
2142 */
2143void bnx2x_squeeze_objects(struct bnx2x *bp)
2144{
2145        int rc;
2146        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2147        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2148        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2149
2150        /***************** Cleanup MACs' object first *************************/
2151
2152        /* Wait for completion of requested */
2153        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2154        /* Perform a dry cleanup */
2155        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2156
2157        /* Clean ETH primary MAC */
2158        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2159        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2160                                 &ramrod_flags);
2161        if (rc != 0)
2162                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2163
2164        /* Cleanup UC list */
2165        vlan_mac_flags = 0;
2166        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2167        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2168                                 &ramrod_flags);
2169        if (rc != 0)
2170                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2171
2172        /***************** Now clean mcast object *****************************/
2173        rparam.mcast_obj = &bp->mcast_obj;
2174        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2175
2176        /* Add a DEL command... - Since we're doing a driver cleanup only,
2177         * we take a lock surrounding both the initial send and the CONTs,
2178         * as we don't want a true completion to disrupt us in the middle.
2179         */
2180        netif_addr_lock_bh(bp->dev);
2181        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2182        if (rc < 0)
2183                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2184                          rc);
2185
2186        /* ...and wait until all pending commands are cleared */
2187        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188        while (rc != 0) {
2189                if (rc < 0) {
2190                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2191                                  rc);
2192                        netif_addr_unlock_bh(bp->dev);
2193                        return;
2194                }
2195
2196                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2197        }
2198        netif_addr_unlock_bh(bp->dev);
2199}
2200
2201#ifndef BNX2X_STOP_ON_ERROR
2202#define LOAD_ERROR_EXIT(bp, label) \
2203        do { \
2204                (bp)->state = BNX2X_STATE_ERROR; \
2205                goto label; \
2206        } while (0)
2207
2208#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2209        do { \
2210                bp->cnic_loaded = false; \
2211                goto label; \
2212        } while (0)
2213#else /*BNX2X_STOP_ON_ERROR*/
2214#define LOAD_ERROR_EXIT(bp, label) \
2215        do { \
2216                (bp)->state = BNX2X_STATE_ERROR; \
2217                (bp)->panic = 1; \
2218                return -EBUSY; \
2219        } while (0)
2220#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2221        do { \
2222                bp->cnic_loaded = false; \
2223                (bp)->panic = 1; \
2224                return -EBUSY; \
2225        } while (0)
2226#endif /*BNX2X_STOP_ON_ERROR*/
2227
2228static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2229{
2230        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2231                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2232        return;
2233}
2234
2235static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2236{
2237        int num_groups, vf_headroom = 0;
2238        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2239
2240        /* number of queues for statistics is number of eth queues + FCoE */
2241        u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2242
2243        /* Total number of FW statistics requests =
2244         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2245         * and fcoe l2 queue) stats + num of queues (which includes another 1
2246         * for fcoe l2 queue if applicable)
2247         */
2248        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2249
2250        /* vf stats appear in the request list, but their data is allocated by
2251         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2252         * it is used to determine where to place the vf stats queries in the
2253         * request struct
2254         */
2255        if (IS_SRIOV(bp))
2256                vf_headroom = bnx2x_vf_headroom(bp);
2257
2258        /* Request is built from stats_query_header and an array of
2259         * stats_query_cmd_group each of which contains
2260         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2261         * configured in the stats_query_header.
2262         */
2263        num_groups =
2264                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2265                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2266                 1 : 0));
2267
2268        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2269           bp->fw_stats_num, vf_headroom, num_groups);
2270        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2271                num_groups * sizeof(struct stats_query_cmd_group);
2272
2273        /* Data for statistics requests + stats_counter
2274         * stats_counter holds per-STORM counters that are incremented
2275         * when STORM has finished with the current request.
2276         * memory for FCoE offloaded statistics are counted anyway,
2277         * even if they will not be sent.
2278         * VF stats are not accounted for here as the data of VF stats is stored
2279         * in memory allocated by the VF, not here.
2280         */
2281        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2282                sizeof(struct per_pf_stats) +
2283                sizeof(struct fcoe_statistics_params) +
2284                sizeof(struct per_queue_stats) * num_queue_stats +
2285                sizeof(struct stats_counter);
2286
2287        bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2288                                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2289        if (!bp->fw_stats)
2290                goto alloc_mem_err;
2291
2292        /* Set shortcuts */
2293        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2294        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2295        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2296                ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2297        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2298                bp->fw_stats_req_sz;
2299
2300        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2301           U64_HI(bp->fw_stats_req_mapping),
2302           U64_LO(bp->fw_stats_req_mapping));
2303        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2304           U64_HI(bp->fw_stats_data_mapping),
2305           U64_LO(bp->fw_stats_data_mapping));
2306        return 0;
2307
2308alloc_mem_err:
2309        bnx2x_free_fw_stats_mem(bp);
2310        BNX2X_ERR("Can't allocate FW stats memory\n");
2311        return -ENOMEM;
2312}
2313
2314/* send load request to mcp and analyze response */
2315static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2316{
2317        u32 param;
2318
2319        /* init fw_seq */
2320        bp->fw_seq =
2321                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2322                 DRV_MSG_SEQ_NUMBER_MASK);
2323        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2324
2325        /* Get current FW pulse sequence */
2326        bp->fw_drv_pulse_wr_seq =
2327                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2328                 DRV_PULSE_SEQ_MASK);
2329        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2330
2331        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2332
2333        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2334                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2335
2336        /* load request */
2337        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2338
2339        /* if mcp fails to respond we must abort */
2340        if (!(*load_code)) {
2341                BNX2X_ERR("MCP response failure, aborting\n");
2342                return -EBUSY;
2343        }
2344
2345        /* If mcp refused (e.g. other port is in diagnostic mode) we
2346         * must abort
2347         */
2348        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2349                BNX2X_ERR("MCP refused load request, aborting\n");
2350                return -EBUSY;
2351        }
2352        return 0;
2353}
2354
2355/* check whether another PF has already loaded FW to chip. In
2356 * virtualized environments a pf from another VM may have already
2357 * initialized the device including loading FW
2358 */
2359int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2360{
2361        /* is another pf loaded on this engine? */
2362        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2363            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2364                /* build my FW version dword */
2365                u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2366                        (BCM_5710_FW_MINOR_VERSION << 8) +
2367                        (BCM_5710_FW_REVISION_VERSION << 16) +
2368                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2369
2370                /* read loaded FW from chip */
2371                u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2372
2373                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2374                   loaded_fw, my_fw);
2375
2376                /* abort nic load if version mismatch */
2377                if (my_fw != loaded_fw) {
2378                        if (print_err)
2379                                BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2380                                          loaded_fw, my_fw);
2381                        else
2382                                BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2383                                               loaded_fw, my_fw);
2384                        return -EBUSY;
2385                }
2386        }
2387        return 0;
2388}
2389
2390/* returns the "mcp load_code" according to global load_count array */
2391static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2392{
2393        int path = BP_PATH(bp);
2394
2395        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2396           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2397           bnx2x_load_count[path][2]);
2398        bnx2x_load_count[path][0]++;
2399        bnx2x_load_count[path][1 + port]++;
2400        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2401           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2402           bnx2x_load_count[path][2]);
2403        if (bnx2x_load_count[path][0] == 1)
2404                return FW_MSG_CODE_DRV_LOAD_COMMON;
2405        else if (bnx2x_load_count[path][1 + port] == 1)
2406                return FW_MSG_CODE_DRV_LOAD_PORT;
2407        else
2408                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2409}
2410
2411/* mark PMF if applicable */
2412static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2413{
2414        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2415            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2416            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2417                bp->port.pmf = 1;
2418                /* We need the barrier to ensure the ordering between the
2419                 * writing to bp->port.pmf here and reading it from the
2420                 * bnx2x_periodic_task().
2421                 */
2422                smp_mb();
2423        } else {
2424                bp->port.pmf = 0;
2425        }
2426
2427        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2428}
2429
2430static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2431{
2432        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2433             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2434            (bp->common.shmem2_base)) {
2435                if (SHMEM2_HAS(bp, dcc_support))
2436                        SHMEM2_WR(bp, dcc_support,
2437                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2438                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2439                if (SHMEM2_HAS(bp, afex_driver_support))
2440                        SHMEM2_WR(bp, afex_driver_support,
2441                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2442        }
2443
2444        /* Set AFEX default VLAN tag to an invalid value */
2445        bp->afex_def_vlan_tag = -1;
2446}
2447
2448/**
2449 * bnx2x_bz_fp - zero content of the fastpath structure.
2450 *
2451 * @bp:         driver handle
2452 * @index:      fastpath index to be zeroed
2453 *
2454 * Makes sure the contents of the bp->fp[index].napi is kept
2455 * intact.
2456 */
2457static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2458{
2459        struct bnx2x_fastpath *fp = &bp->fp[index];
2460        int cos;
2461        struct napi_struct orig_napi = fp->napi;
2462        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2463
2464        /* bzero bnx2x_fastpath contents */
2465        if (fp->tpa_info)
2466                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2467                       sizeof(struct bnx2x_agg_info));
2468        memset(fp, 0, sizeof(*fp));
2469
2470        /* Restore the NAPI object as it has been already initialized */
2471        fp->napi = orig_napi;
2472        fp->tpa_info = orig_tpa_info;
2473        fp->bp = bp;
2474        fp->index = index;
2475        if (IS_ETH_FP(fp))
2476                fp->max_cos = bp->max_cos;
2477        else
2478                /* Special queues support only one CoS */
2479                fp->max_cos = 1;
2480
2481        /* Init txdata pointers */
2482        if (IS_FCOE_FP(fp))
2483                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2484        if (IS_ETH_FP(fp))
2485                for_each_cos_in_tx_queue(fp, cos)
2486                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2487                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2488
2489        /* set the tpa flag for each queue. The tpa flag determines the queue
2490         * minimal size so it must be set prior to queue memory allocation
2491         */
2492        if (bp->dev->features & NETIF_F_LRO)
2493                fp->mode = TPA_MODE_LRO;
2494        else if (bp->dev->features & NETIF_F_GRO_HW)
2495                fp->mode = TPA_MODE_GRO;
2496        else
2497                fp->mode = TPA_MODE_DISABLED;
2498
2499        /* We don't want TPA if it's disabled in bp
2500         * or if this is an FCoE L2 ring.
2501         */
2502        if (bp->disable_tpa || IS_FCOE_FP(fp))
2503                fp->mode = TPA_MODE_DISABLED;
2504}
2505
2506void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2507{
2508        u32 cur;
2509
2510        if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2511                return;
2512
2513        cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2514        DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2515           cur, state);
2516
2517        SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2518}
2519
2520int bnx2x_load_cnic(struct bnx2x *bp)
2521{
2522        int i, rc, port = BP_PORT(bp);
2523
2524        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2525
2526        mutex_init(&bp->cnic_mutex);
2527
2528        if (IS_PF(bp)) {
2529                rc = bnx2x_alloc_mem_cnic(bp);
2530                if (rc) {
2531                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2532                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533                }
2534        }
2535
2536        rc = bnx2x_alloc_fp_mem_cnic(bp);
2537        if (rc) {
2538                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2539                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540        }
2541
2542        /* Update the number of queues with the cnic queues */
2543        rc = bnx2x_set_real_num_queues(bp, 1);
2544        if (rc) {
2545                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2546                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547        }
2548
2549        /* Add all CNIC NAPI objects */
2550        bnx2x_add_all_napi_cnic(bp);
2551        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2552        bnx2x_napi_enable_cnic(bp);
2553
2554        rc = bnx2x_init_hw_func_cnic(bp);
2555        if (rc)
2556                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2557
2558        bnx2x_nic_init_cnic(bp);
2559
2560        if (IS_PF(bp)) {
2561                /* Enable Timer scan */
2562                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2563
2564                /* setup cnic queues */
2565                for_each_cnic_queue(bp, i) {
2566                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2567                        if (rc) {
2568                                BNX2X_ERR("Queue setup failed\n");
2569                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2570                        }
2571                }
2572        }
2573
2574        /* Initialize Rx filter. */
2575        bnx2x_set_rx_mode_inner(bp);
2576
2577        /* re-read iscsi info */
2578        bnx2x_get_iscsi_info(bp);
2579        bnx2x_setup_cnic_irq_info(bp);
2580        bnx2x_setup_cnic_info(bp);
2581        bp->cnic_loaded = true;
2582        if (bp->state == BNX2X_STATE_OPEN)
2583                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2584
2585        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2586
2587        return 0;
2588
2589#ifndef BNX2X_STOP_ON_ERROR
2590load_error_cnic2:
2591        /* Disable Timer scan */
2592        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2593
2594load_error_cnic1:
2595        bnx2x_napi_disable_cnic(bp);
2596        /* Update the number of queues without the cnic queues */
2597        if (bnx2x_set_real_num_queues(bp, 0))
2598                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2599load_error_cnic0:
2600        BNX2X_ERR("CNIC-related load failed\n");
2601        bnx2x_free_fp_mem_cnic(bp);
2602        bnx2x_free_mem_cnic(bp);
2603        return rc;
2604#endif /* ! BNX2X_STOP_ON_ERROR */
2605}
2606
2607/* must be called with rtnl_lock */
2608int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2609{
2610        int port = BP_PORT(bp);
2611        int i, rc = 0, load_code = 0;
2612
2613        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2614        DP(NETIF_MSG_IFUP,
2615           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2616
2617#ifdef BNX2X_STOP_ON_ERROR
2618        if (unlikely(bp->panic)) {
2619                BNX2X_ERR("Can't load NIC when there is panic\n");
2620                return -EPERM;
2621        }
2622#endif
2623
2624        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2625
2626        /* zero the structure w/o any lock, before SP handler is initialized */
2627        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2628        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2629                &bp->last_reported_link.link_report_flags);
2630
2631        if (IS_PF(bp))
2632                /* must be called before memory allocation and HW init */
2633                bnx2x_ilt_set_info(bp);
2634
2635        /*
2636         * Zero fastpath structures preserving invariants like napi, which are
2637         * allocated only once, fp index, max_cos, bp pointer.
2638         * Also set fp->mode and txdata_ptr.
2639         */
2640        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2641        for_each_queue(bp, i)
2642                bnx2x_bz_fp(bp, i);
2643        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2644                                  bp->num_cnic_queues) *
2645                                  sizeof(struct bnx2x_fp_txdata));
2646
2647        bp->fcoe_init = false;
2648
2649        /* Set the receive queues buffer size */
2650        bnx2x_set_rx_buf_size(bp);
2651
2652        if (IS_PF(bp)) {
2653                rc = bnx2x_alloc_mem(bp);
2654                if (rc) {
2655                        BNX2X_ERR("Unable to allocate bp memory\n");
2656                        return rc;
2657                }
2658        }
2659
2660        /* need to be done after alloc mem, since it's self adjusting to amount
2661         * of memory available for RSS queues
2662         */
2663        rc = bnx2x_alloc_fp_mem(bp);
2664        if (rc) {
2665                BNX2X_ERR("Unable to allocate memory for fps\n");
2666                LOAD_ERROR_EXIT(bp, load_error0);
2667        }
2668
2669        /* Allocated memory for FW statistics  */
2670        if (bnx2x_alloc_fw_stats_mem(bp))
2671                LOAD_ERROR_EXIT(bp, load_error0);
2672
2673        /* request pf to initialize status blocks */
2674        if (IS_VF(bp)) {
2675                rc = bnx2x_vfpf_init(bp);
2676                if (rc)
2677                        LOAD_ERROR_EXIT(bp, load_error0);
2678        }
2679
2680        /* As long as bnx2x_alloc_mem() may possibly update
2681         * bp->num_queues, bnx2x_set_real_num_queues() should always
2682         * come after it. At this stage cnic queues are not counted.
2683         */
2684        rc = bnx2x_set_real_num_queues(bp, 0);
2685        if (rc) {
2686                BNX2X_ERR("Unable to set real_num_queues\n");
2687                LOAD_ERROR_EXIT(bp, load_error0);
2688        }
2689
2690        /* configure multi cos mappings in kernel.
2691         * this configuration may be overridden by a multi class queue
2692         * discipline or by a dcbx negotiation result.
2693         */
2694        bnx2x_setup_tc(bp->dev, bp->max_cos);
2695
2696        /* Add all NAPI objects */
2697        bnx2x_add_all_napi(bp);
2698        DP(NETIF_MSG_IFUP, "napi added\n");
2699        bnx2x_napi_enable(bp);
2700
2701        if (IS_PF(bp)) {
2702                /* set pf load just before approaching the MCP */
2703                bnx2x_set_pf_load(bp);
2704
2705                /* if mcp exists send load request and analyze response */
2706                if (!BP_NOMCP(bp)) {
2707                        /* attempt to load pf */
2708                        rc = bnx2x_nic_load_request(bp, &load_code);
2709                        if (rc)
2710                                LOAD_ERROR_EXIT(bp, load_error1);
2711
2712                        /* what did mcp say? */
2713                        rc = bnx2x_compare_fw_ver(bp, load_code, true);
2714                        if (rc) {
2715                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2716                                LOAD_ERROR_EXIT(bp, load_error2);
2717                        }
2718                } else {
2719                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2720                }
2721
2722                /* mark pmf if applicable */
2723                bnx2x_nic_load_pmf(bp, load_code);
2724
2725                /* Init Function state controlling object */
2726                bnx2x__init_func_obj(bp);
2727
2728                /* Initialize HW */
2729                rc = bnx2x_init_hw(bp, load_code);
2730                if (rc) {
2731                        BNX2X_ERR("HW init failed, aborting\n");
2732                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2733                        LOAD_ERROR_EXIT(bp, load_error2);
2734                }
2735        }
2736
2737        bnx2x_pre_irq_nic_init(bp);
2738
2739        /* Connect to IRQs */
2740        rc = bnx2x_setup_irqs(bp);
2741        if (rc) {
2742                BNX2X_ERR("setup irqs failed\n");
2743                if (IS_PF(bp))
2744                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2745                LOAD_ERROR_EXIT(bp, load_error2);
2746        }
2747
2748        /* Init per-function objects */
2749        if (IS_PF(bp)) {
2750                /* Setup NIC internals and enable interrupts */
2751                bnx2x_post_irq_nic_init(bp, load_code);
2752
2753                bnx2x_init_bp_objs(bp);
2754                bnx2x_iov_nic_init(bp);
2755
2756                /* Set AFEX default VLAN tag to an invalid value */
2757                bp->afex_def_vlan_tag = -1;
2758                bnx2x_nic_load_afex_dcc(bp, load_code);
2759                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2760                rc = bnx2x_func_start(bp);
2761                if (rc) {
2762                        BNX2X_ERR("Function start failed!\n");
2763                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2764
2765                        LOAD_ERROR_EXIT(bp, load_error3);
2766                }
2767
2768                /* Send LOAD_DONE command to MCP */
2769                if (!BP_NOMCP(bp)) {
2770                        load_code = bnx2x_fw_command(bp,
2771                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2772                        if (!load_code) {
2773                                BNX2X_ERR("MCP response failure, aborting\n");
2774                                rc = -EBUSY;
2775                                LOAD_ERROR_EXIT(bp, load_error3);
2776                        }
2777                }
2778
2779                /* initialize FW coalescing state machines in RAM */
2780                bnx2x_update_coalesce(bp);
2781        }
2782
2783        /* setup the leading queue */
2784        rc = bnx2x_setup_leading(bp);
2785        if (rc) {
2786                BNX2X_ERR("Setup leading failed!\n");
2787                LOAD_ERROR_EXIT(bp, load_error3);
2788        }
2789
2790        /* set up the rest of the queues */
2791        for_each_nondefault_eth_queue(bp, i) {
2792                if (IS_PF(bp))
2793                        rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2794                else /* VF */
2795                        rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2796                if (rc) {
2797                        BNX2X_ERR("Queue %d setup failed\n", i);
2798                        LOAD_ERROR_EXIT(bp, load_error3);
2799                }
2800        }
2801
2802        /* setup rss */
2803        rc = bnx2x_init_rss(bp);
2804        if (rc) {
2805                BNX2X_ERR("PF RSS init failed\n");
2806                LOAD_ERROR_EXIT(bp, load_error3);
2807        }
2808
2809        /* Now when Clients are configured we are ready to work */
2810        bp->state = BNX2X_STATE_OPEN;
2811
2812        /* Configure a ucast MAC */
2813        if (IS_PF(bp))
2814                rc = bnx2x_set_eth_mac(bp, true);
2815        else /* vf */
2816                rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2817                                           true);
2818        if (rc) {
2819                BNX2X_ERR("Setting Ethernet MAC failed\n");
2820                LOAD_ERROR_EXIT(bp, load_error3);
2821        }
2822
2823        if (IS_PF(bp) && bp->pending_max) {
2824                bnx2x_update_max_mf_config(bp, bp->pending_max);
2825                bp->pending_max = 0;
2826        }
2827
2828        bp->force_link_down = false;
2829        if (bp->port.pmf) {
2830                rc = bnx2x_initial_phy_init(bp, load_mode);
2831                if (rc)
2832                        LOAD_ERROR_EXIT(bp, load_error3);
2833        }
2834        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2835
2836        /* Start fast path */
2837
2838        /* Re-configure vlan filters */
2839        rc = bnx2x_vlan_reconfigure_vid(bp);
2840        if (rc)
2841                LOAD_ERROR_EXIT(bp, load_error3);
2842
2843        /* Initialize Rx filter. */
2844        bnx2x_set_rx_mode_inner(bp);
2845
2846        if (bp->flags & PTP_SUPPORTED) {
2847                bnx2x_register_phc(bp);
2848                bnx2x_init_ptp(bp);
2849                bnx2x_configure_ptp_filters(bp);
2850        }
2851        /* Start Tx */
2852        switch (load_mode) {
2853        case LOAD_NORMAL:
2854                /* Tx queue should be only re-enabled */
2855                netif_tx_wake_all_queues(bp->dev);
2856                break;
2857
2858        case LOAD_OPEN:
2859                netif_tx_start_all_queues(bp->dev);
2860                smp_mb__after_atomic();
2861                break;
2862
2863        case LOAD_DIAG:
2864        case LOAD_LOOPBACK_EXT:
2865                bp->state = BNX2X_STATE_DIAG;
2866                break;
2867
2868        default:
2869                break;
2870        }
2871
2872        if (bp->port.pmf)
2873                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2874        else
2875                bnx2x__link_status_update(bp);
2876
2877        /* start the timer */
2878        mod_timer(&bp->timer, jiffies + bp->current_interval);
2879
2880        if (CNIC_ENABLED(bp))
2881                bnx2x_load_cnic(bp);
2882
2883        if (IS_PF(bp))
2884                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2885
2886        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2887                /* mark driver is loaded in shmem2 */
2888                u32 val;
2889                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2890                val &= ~DRV_FLAGS_MTU_MASK;
2891                val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2892                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2893                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2894                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2895        }
2896
2897        /* Wait for all pending SP commands to complete */
2898        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2899                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2900                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2901                return -EBUSY;
2902        }
2903
2904        /* Update driver data for On-Chip MFW dump. */
2905        if (IS_PF(bp))
2906                bnx2x_update_mfw_dump(bp);
2907
2908        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2909        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2910                bnx2x_dcbx_init(bp, false);
2911
2912        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2913                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2914
2915        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2916
2917        return 0;
2918
2919#ifndef BNX2X_STOP_ON_ERROR
2920load_error3:
2921        if (IS_PF(bp)) {
2922                bnx2x_int_disable_sync(bp, 1);
2923
2924                /* Clean queueable objects */
2925                bnx2x_squeeze_objects(bp);
2926        }
2927
2928        /* Free SKBs, SGEs, TPA pool and driver internals */
2929        bnx2x_free_skbs(bp);
2930        for_each_rx_queue(bp, i)
2931                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2932
2933        /* Release IRQs */
2934        bnx2x_free_irq(bp);
2935load_error2:
2936        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2937                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2938                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2939        }
2940
2941        bp->port.pmf = 0;
2942load_error1:
2943        bnx2x_napi_disable(bp);
2944        bnx2x_del_all_napi(bp);
2945
2946        /* clear pf_load status, as it was already set */
2947        if (IS_PF(bp))
2948                bnx2x_clear_pf_load(bp);
2949load_error0:
2950        bnx2x_free_fw_stats_mem(bp);
2951        bnx2x_free_fp_mem(bp);
2952        bnx2x_free_mem(bp);
2953
2954        return rc;
2955#endif /* ! BNX2X_STOP_ON_ERROR */
2956}
2957
2958int bnx2x_drain_tx_queues(struct bnx2x *bp)
2959{
2960        u8 rc = 0, cos, i;
2961
2962        /* Wait until tx fastpath tasks complete */
2963        for_each_tx_queue(bp, i) {
2964                struct bnx2x_fastpath *fp = &bp->fp[i];
2965
2966                for_each_cos_in_tx_queue(fp, cos)
2967                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2968                if (rc)
2969                        return rc;
2970        }
2971        return 0;
2972}
2973
2974/* must be called with rtnl_lock */
2975int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2976{
2977        int i;
2978        bool global = false;
2979
2980        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2981
2982        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2983                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2984
2985        /* mark driver is unloaded in shmem2 */
2986        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2987                u32 val;
2988                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2989                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2990                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2991        }
2992
2993        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2994            (bp->state == BNX2X_STATE_CLOSED ||
2995             bp->state == BNX2X_STATE_ERROR)) {
2996                /* We can get here if the driver has been unloaded
2997                 * during parity error recovery and is either waiting for a
2998                 * leader to complete or for other functions to unload and
2999                 * then ifdown has been issued. In this case we want to
3000                 * unload and let other functions to complete a recovery
3001                 * process.
3002                 */
3003                bp->recovery_state = BNX2X_RECOVERY_DONE;
3004                bp->is_leader = 0;
3005                bnx2x_release_leader_lock(bp);
3006                smp_mb();
3007
3008                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3009                BNX2X_ERR("Can't unload in closed or error state\n");
3010                return -EINVAL;
3011        }
3012
3013        /* Nothing to do during unload if previous bnx2x_nic_load()
3014         * have not completed successfully - all resources are released.
3015         *
3016         * we can get here only after unsuccessful ndo_* callback, during which
3017         * dev->IFF_UP flag is still on.
3018         */
3019        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3020                return 0;
3021
3022        /* It's important to set the bp->state to the value different from
3023         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3024         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3025         */
3026        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3027        smp_mb();
3028
3029        /* indicate to VFs that the PF is going down */
3030        bnx2x_iov_channel_down(bp);
3031
3032        if (CNIC_LOADED(bp))
3033                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3034
3035        /* Stop Tx */
3036        bnx2x_tx_disable(bp);
3037        netdev_reset_tc(bp->dev);
3038
3039        bp->rx_mode = BNX2X_RX_MODE_NONE;
3040
3041        del_timer_sync(&bp->timer);
3042
3043        if (IS_PF(bp) && !BP_NOMCP(bp)) {
3044                /* Set ALWAYS_ALIVE bit in shmem */
3045                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3046                bnx2x_drv_pulse(bp);
3047                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3048                bnx2x_save_statistics(bp);
3049        }
3050
3051        /* wait till consumers catch up with producers in all queues.
3052         * If we're recovering, FW can't write to host so no reason
3053         * to wait for the queues to complete all Tx.
3054         */
3055        if (unload_mode != UNLOAD_RECOVERY)
3056                bnx2x_drain_tx_queues(bp);
3057
3058        /* if VF indicate to PF this function is going down (PF will delete sp
3059         * elements and clear initializations
3060         */
3061        if (IS_VF(bp)) {
3062                bnx2x_clear_vlan_info(bp);
3063                bnx2x_vfpf_close_vf(bp);
3064        } else if (unload_mode != UNLOAD_RECOVERY) {
3065                /* if this is a normal/close unload need to clean up chip*/
3066                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3067        } else {
3068                /* Send the UNLOAD_REQUEST to the MCP */
3069                bnx2x_send_unload_req(bp, unload_mode);
3070
3071                /* Prevent transactions to host from the functions on the
3072                 * engine that doesn't reset global blocks in case of global
3073                 * attention once global blocks are reset and gates are opened
3074                 * (the engine which leader will perform the recovery
3075                 * last).
3076                 */
3077                if (!CHIP_IS_E1x(bp))
3078                        bnx2x_pf_disable(bp);
3079
3080                /* Disable HW interrupts, NAPI */
3081                bnx2x_netif_stop(bp, 1);
3082                /* Delete all NAPI objects */
3083                bnx2x_del_all_napi(bp);
3084                if (CNIC_LOADED(bp))
3085                        bnx2x_del_all_napi_cnic(bp);
3086                /* Release IRQs */
3087                bnx2x_free_irq(bp);
3088
3089                /* Report UNLOAD_DONE to MCP */
3090                bnx2x_send_unload_done(bp, false);
3091        }
3092
3093        /*
3094         * At this stage no more interrupts will arrive so we may safely clean
3095         * the queueable objects here in case they failed to get cleaned so far.
3096         */
3097        if (IS_PF(bp))
3098                bnx2x_squeeze_objects(bp);
3099
3100        /* There should be no more pending SP commands at this stage */
3101        bp->sp_state = 0;
3102
3103        bp->port.pmf = 0;
3104
3105        /* clear pending work in rtnl task */
3106        bp->sp_rtnl_state = 0;
3107        smp_mb();
3108
3109        /* Free SKBs, SGEs, TPA pool and driver internals */
3110        bnx2x_free_skbs(bp);
3111        if (CNIC_LOADED(bp))
3112                bnx2x_free_skbs_cnic(bp);
3113        for_each_rx_queue(bp, i)
3114                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3115
3116        bnx2x_free_fp_mem(bp);
3117        if (CNIC_LOADED(bp))
3118                bnx2x_free_fp_mem_cnic(bp);
3119
3120        if (IS_PF(bp)) {
3121                if (CNIC_LOADED(bp))
3122                        bnx2x_free_mem_cnic(bp);
3123        }
3124        bnx2x_free_mem(bp);
3125
3126        bp->state = BNX2X_STATE_CLOSED;
3127        bp->cnic_loaded = false;
3128
3129        /* Clear driver version indication in shmem */
3130        if (IS_PF(bp) && !BP_NOMCP(bp))
3131                bnx2x_update_mng_version(bp);
3132
3133        /* Check if there are pending parity attentions. If there are - set
3134         * RECOVERY_IN_PROGRESS.
3135         */
3136        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3137                bnx2x_set_reset_in_progress(bp);
3138
3139                /* Set RESET_IS_GLOBAL if needed */
3140                if (global)
3141                        bnx2x_set_reset_global(bp);
3142        }
3143
3144        /* The last driver must disable a "close the gate" if there is no
3145         * parity attention or "process kill" pending.
3146         */
3147        if (IS_PF(bp) &&
3148            !bnx2x_clear_pf_load(bp) &&
3149            bnx2x_reset_is_done(bp, BP_PATH(bp)))
3150                bnx2x_disable_close_the_gate(bp);
3151
3152        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3153
3154        return 0;
3155}
3156
3157int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3158{
3159        u16 pmcsr;
3160
3161        /* If there is no power capability, silently succeed */
3162        if (!bp->pdev->pm_cap) {
3163                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3164                return 0;
3165        }
3166
3167        pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3168
3169        switch (state) {
3170        case PCI_D0:
3171                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3172                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3173                                       PCI_PM_CTRL_PME_STATUS));
3174
3175                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3176                        /* delay required during transition out of D3hot */
3177                        msleep(20);
3178                break;
3179
3180        case PCI_D3hot:
3181                /* If there are other clients above don't
3182                   shut down the power */
3183                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3184                        return 0;
3185                /* Don't shut down the power for emulation and FPGA */
3186                if (CHIP_REV_IS_SLOW(bp))
3187                        return 0;
3188
3189                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3190                pmcsr |= 3;
3191
3192                if (bp->wol)
3193                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3194
3195                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3196                                      pmcsr);
3197
3198                /* No more memory access after this point until
3199                * device is brought back to D0.
3200                */
3201                break;
3202
3203        default:
3204                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3205                return -EINVAL;
3206        }
3207        return 0;
3208}
3209
3210/*
3211 * net_device service functions
3212 */
3213static int bnx2x_poll(struct napi_struct *napi, int budget)
3214{
3215        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3216                                                 napi);
3217        struct bnx2x *bp = fp->bp;
3218        int rx_work_done;
3219        u8 cos;
3220
3221#ifdef BNX2X_STOP_ON_ERROR
3222        if (unlikely(bp->panic)) {
3223                napi_complete(napi);
3224                return 0;
3225        }
3226#endif
3227        for_each_cos_in_tx_queue(fp, cos)
3228                if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3229                        bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3230
3231        rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3232
3233        if (rx_work_done < budget) {
3234                /* No need to update SB for FCoE L2 ring as long as
3235                 * it's connected to the default SB and the SB
3236                 * has been updated when NAPI was scheduled.
3237                 */
3238                if (IS_FCOE_FP(fp)) {
3239                        napi_complete_done(napi, rx_work_done);
3240                } else {
3241                        bnx2x_update_fpsb_idx(fp);
3242                        /* bnx2x_has_rx_work() reads the status block,
3243                         * thus we need to ensure that status block indices
3244                         * have been actually read (bnx2x_update_fpsb_idx)
3245                         * prior to this check (bnx2x_has_rx_work) so that
3246                         * we won't write the "newer" value of the status block
3247                         * to IGU (if there was a DMA right after
3248                         * bnx2x_has_rx_work and if there is no rmb, the memory
3249                         * reading (bnx2x_update_fpsb_idx) may be postponed
3250                         * to right before bnx2x_ack_sb). In this case there
3251                         * will never be another interrupt until there is
3252                         * another update of the status block, while there
3253                         * is still unhandled work.
3254                         */
3255                        rmb();
3256
3257                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3258                                if (napi_complete_done(napi, rx_work_done)) {
3259                                        /* Re-enable interrupts */
3260                                        DP(NETIF_MSG_RX_STATUS,
3261                                           "Update index to %d\n", fp->fp_hc_idx);
3262                                        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3263                                                     le16_to_cpu(fp->fp_hc_idx),
3264                                                     IGU_INT_ENABLE, 1);
3265                                }
3266                        } else {
3267                                rx_work_done = budget;
3268                        }
3269                }
3270        }
3271
3272        return rx_work_done;
3273}
3274
3275/* we split the first BD into headers and data BDs
3276 * to ease the pain of our fellow microcode engineers
3277 * we use one mapping for both BDs
3278 */
3279static u16 bnx2x_tx_split(struct bnx2x *bp,
3280                          struct bnx2x_fp_txdata *txdata,
3281                          struct sw_tx_bd *tx_buf,
3282                          struct eth_tx_start_bd **tx_bd, u16 hlen,
3283                          u16 bd_prod)
3284{
3285        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3286        struct eth_tx_bd *d_tx_bd;
3287        dma_addr_t mapping;
3288        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3289
3290        /* first fix first BD */
3291        h_tx_bd->nbytes = cpu_to_le16(hlen);
3292
3293        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3294           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3295
3296        /* now get a new data BD
3297         * (after the pbd) and fill it */
3298        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3299        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3300
3301        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3302                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3303
3304        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3305        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3306        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3307
3308        /* this marks the BD as one that has no individual mapping */
3309        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3310
3311        DP(NETIF_MSG_TX_QUEUED,
3312           "TSO split data size is %d (%x:%x)\n",
3313           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3314
3315        /* update tx_bd */
3316        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3317
3318        return bd_prod;
3319}
3320
3321#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3322#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3323static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3324{
3325        __sum16 tsum = (__force __sum16) csum;
3326
3327        if (fix > 0)
3328                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3329                                  csum_partial(t_header - fix, fix, 0)));
3330
3331        else if (fix < 0)
3332                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3333                                  csum_partial(t_header, -fix, 0)));
3334
3335        return bswab16(tsum);
3336}
3337
3338static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3339{
3340        u32 rc;
3341        __u8 prot = 0;
3342        __be16 protocol;
3343
3344        if (skb->ip_summed != CHECKSUM_PARTIAL)
3345                return XMIT_PLAIN;
3346
3347        protocol = vlan_get_protocol(skb);
3348        if (protocol == htons(ETH_P_IPV6)) {
3349                rc = XMIT_CSUM_V6;
3350                prot = ipv6_hdr(skb)->nexthdr;
3351        } else {
3352                rc = XMIT_CSUM_V4;
3353                prot = ip_hdr(skb)->protocol;
3354        }
3355
3356        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3357                if (inner_ip_hdr(skb)->version == 6) {
3358                        rc |= XMIT_CSUM_ENC_V6;
3359                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3360                                rc |= XMIT_CSUM_TCP;
3361                } else {
3362                        rc |= XMIT_CSUM_ENC_V4;
3363                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3364                                rc |= XMIT_CSUM_TCP;
3365                }
3366        }
3367        if (prot == IPPROTO_TCP)
3368                rc |= XMIT_CSUM_TCP;
3369
3370        if (skb_is_gso(skb)) {
3371                if (skb_is_gso_v6(skb)) {
3372                        rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3373                        if (rc & XMIT_CSUM_ENC)
3374                                rc |= XMIT_GSO_ENC_V6;
3375                } else {
3376                        rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3377                        if (rc & XMIT_CSUM_ENC)
3378                                rc |= XMIT_GSO_ENC_V4;
3379                }
3380        }
3381
3382        return rc;
3383}
3384
3385/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3386#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3387
3388/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3389#define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3390
3391#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3392/* check if packet requires linearization (packet is too fragmented)
3393   no need to check fragmentation if page size > 8K (there will be no
3394   violation to FW restrictions) */
3395static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3396                             u32 xmit_type)
3397{
3398        int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3399        int to_copy = 0, hlen = 0;
3400
3401        if (xmit_type & XMIT_GSO_ENC)
3402                num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3403
3404        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3405                if (xmit_type & XMIT_GSO) {
3406                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3407                        int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3408                        /* Number of windows to check */
3409                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3410                        int wnd_idx = 0;
3411                        int frag_idx = 0;
3412                        u32 wnd_sum = 0;
3413
3414                        /* Headers length */
3415                        if (xmit_type & XMIT_GSO_ENC)
3416                                hlen = (int)(skb_inner_transport_header(skb) -
3417                                             skb->data) +
3418                                             inner_tcp_hdrlen(skb);
3419                        else
3420                                hlen = (int)(skb_transport_header(skb) -
3421                                             skb->data) + tcp_hdrlen(skb);
3422
3423                        /* Amount of data (w/o headers) on linear part of SKB*/
3424                        first_bd_sz = skb_headlen(skb) - hlen;
3425
3426                        wnd_sum  = first_bd_sz;
3427
3428                        /* Calculate the first sum - it's special */
3429                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3430                                wnd_sum +=
3431                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3432
3433                        /* If there was data on linear skb data - check it */
3434                        if (first_bd_sz > 0) {
3435                                if (unlikely(wnd_sum < lso_mss)) {
3436                                        to_copy = 1;
3437                                        goto exit_lbl;
3438                                }
3439
3440                                wnd_sum -= first_bd_sz;
3441                        }
3442
3443                        /* Others are easier: run through the frag list and
3444                           check all windows */
3445                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3446                                wnd_sum +=
3447                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3448
3449                                if (unlikely(wnd_sum < lso_mss)) {
3450                                        to_copy = 1;
3451                                        break;
3452                                }
3453                                wnd_sum -=
3454                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3455                        }
3456                } else {
3457                        /* in non-LSO too fragmented packet should always
3458                           be linearized */
3459                        to_copy = 1;
3460                }
3461        }
3462
3463exit_lbl:
3464        if (unlikely(to_copy))
3465                DP(NETIF_MSG_TX_QUEUED,
3466                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3467                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3468                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3469
3470        return to_copy;
3471}
3472#endif
3473
3474/**
3475 * bnx2x_set_pbd_gso - update PBD in GSO case.
3476 *
3477 * @skb:        packet skb
3478 * @pbd:        parse BD
3479 * @xmit_type:  xmit flags
3480 */
3481static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3482                              struct eth_tx_parse_bd_e1x *pbd,
3483                              u32 xmit_type)
3484{
3485        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3486        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3487        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3488
3489        if (xmit_type & XMIT_GSO_V4) {
3490                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3491                pbd->tcp_pseudo_csum =
3492                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3493                                                   ip_hdr(skb)->daddr,
3494                                                   0, IPPROTO_TCP, 0));
3495        } else {
3496                pbd->tcp_pseudo_csum =
3497                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3498                                                 &ipv6_hdr(skb)->daddr,
3499                                                 0, IPPROTO_TCP, 0));
3500        }
3501
3502        pbd->global_data |=
3503                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3504}
3505
3506/**
3507 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3508 *
3509 * @bp:                 driver handle
3510 * @skb:                packet skb
3511 * @parsing_data:       data to be updated
3512 * @xmit_type:          xmit flags
3513 *
3514 * 57712/578xx related, when skb has encapsulation
3515 */
3516static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3517                                 u32 *parsing_data, u32 xmit_type)
3518{
3519        *parsing_data |=
3520                ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3521                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3522                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3523
3524        if (xmit_type & XMIT_CSUM_TCP) {
3525                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3526                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3527                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3528
3529                return skb_inner_transport_header(skb) +
3530                        inner_tcp_hdrlen(skb) - skb->data;
3531        }
3532
3533        /* We support checksum offload for TCP and UDP only.
3534         * No need to pass the UDP header length - it's a constant.
3535         */
3536        return skb_inner_transport_header(skb) +
3537                sizeof(struct udphdr) - skb->data;
3538}
3539
3540/**
3541 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3542 *
3543 * @bp:                 driver handle
3544 * @skb:                packet skb
3545 * @parsing_data:       data to be updated
3546 * @xmit_type:          xmit flags
3547 *
3548 * 57712/578xx related
3549 */
3550static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3551                                u32 *parsing_data, u32 xmit_type)
3552{
3553        *parsing_data |=
3554                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3555                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3556                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3557
3558        if (xmit_type & XMIT_CSUM_TCP) {
3559                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3560                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3561                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3562
3563                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3564        }
3565        /* We support checksum offload for TCP and UDP only.
3566         * No need to pass the UDP header length - it's a constant.
3567         */
3568        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3569}
3570
3571/* set FW indication according to inner or outer protocols if tunneled */
3572static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3573                               struct eth_tx_start_bd *tx_start_bd,
3574                               u32 xmit_type)
3575{
3576        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3577
3578        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3579                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3580
3581        if (!(xmit_type & XMIT_CSUM_TCP))
3582                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3583}
3584
3585/**
3586 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3587 *
3588 * @bp:         driver handle
3589 * @skb:        packet skb
3590 * @pbd:        parse BD to be updated
3591 * @xmit_type:  xmit flags
3592 */
3593static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3594                             struct eth_tx_parse_bd_e1x *pbd,
3595                             u32 xmit_type)
3596{
3597        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3598
3599        /* for now NS flag is not used in Linux */
3600        pbd->global_data =
3601                cpu_to_le16(hlen |
3602                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3603                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3604
3605        pbd->ip_hlen_w = (skb_transport_header(skb) -
3606                        skb_network_header(skb)) >> 1;
3607
3608        hlen += pbd->ip_hlen_w;
3609
3610        /* We support checksum offload for TCP and UDP only */
3611        if (xmit_type & XMIT_CSUM_TCP)
3612                hlen += tcp_hdrlen(skb) / 2;
3613        else
3614                hlen += sizeof(struct udphdr) / 2;
3615
3616        pbd->total_hlen_w = cpu_to_le16(hlen);
3617        hlen = hlen*2;
3618
3619        if (xmit_type & XMIT_CSUM_TCP) {
3620                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3621
3622        } else {
3623                s8 fix = SKB_CS_OFF(skb); /* signed! */
3624
3625                DP(NETIF_MSG_TX_QUEUED,
3626                   "hlen %d  fix %d  csum before fix %x\n",
3627                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3628
3629                /* HW bug: fixup the CSUM */
3630                pbd->tcp_pseudo_csum =
3631                        bnx2x_csum_fix(skb_transport_header(skb),
3632                                       SKB_CS(skb), fix);
3633
3634                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3635                   pbd->tcp_pseudo_csum);
3636        }
3637
3638        return hlen;
3639}
3640
3641static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3642                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3643                                      struct eth_tx_parse_2nd_bd *pbd2,
3644                                      u16 *global_data,
3645                                      u32 xmit_type)
3646{
3647        u16 hlen_w = 0;
3648        u8 outerip_off, outerip_len = 0;
3649
3650        /* from outer IP to transport */
3651        hlen_w = (skb_inner_transport_header(skb) -
3652                  skb_network_header(skb)) >> 1;
3653
3654        /* transport len */
3655        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3656
3657        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3658
3659        /* outer IP header info */
3660        if (xmit_type & XMIT_CSUM_V4) {
3661                struct iphdr *iph = ip_hdr(skb);
3662                u32 csum = (__force u32)(~iph->check) -
3663                           (__force u32)iph->tot_len -
3664                           (__force u32)iph->frag_off;
3665
3666                outerip_len = iph->ihl << 1;
3667
3668                pbd2->fw_ip_csum_wo_len_flags_frag =
3669                        bswab16(csum_fold((__force __wsum)csum));
3670        } else {
3671                pbd2->fw_ip_hdr_to_payload_w =
3672                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3673                pbd_e2->data.tunnel_data.flags |=
3674                        ETH_TUNNEL_DATA_IPV6_OUTER;
3675        }
3676
3677        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3678
3679        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3680
3681        /* inner IP header info */
3682        if (xmit_type & XMIT_CSUM_ENC_V4) {
3683                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3684
3685                pbd_e2->data.tunnel_data.pseudo_csum =
3686                        bswab16(~csum_tcpudp_magic(
3687                                        inner_ip_hdr(skb)->saddr,
3688                                        inner_ip_hdr(skb)->daddr,
3689                                        0, IPPROTO_TCP, 0));
3690        } else {
3691                pbd_e2->data.tunnel_data.pseudo_csum =
3692                        bswab16(~csum_ipv6_magic(
3693                                        &inner_ipv6_hdr(skb)->saddr,
3694                                        &inner_ipv6_hdr(skb)->daddr,
3695                                        0, IPPROTO_TCP, 0));
3696        }
3697
3698        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3699
3700        *global_data |=
3701                outerip_off |
3702                (outerip_len <<
3703                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3704                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3705                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3706
3707        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3708                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3709                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3710        }
3711}
3712
3713static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3714                                         u32 xmit_type)
3715{
3716        struct ipv6hdr *ipv6;
3717
3718        if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3719                return;
3720
3721        if (xmit_type & XMIT_GSO_ENC_V6)
3722                ipv6 = inner_ipv6_hdr(skb);
3723        else /* XMIT_GSO_V6 */
3724                ipv6 = ipv6_hdr(skb);
3725
3726        if (ipv6->nexthdr == NEXTHDR_IPV6)
3727                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3728}
3729
3730/* called with netif_tx_lock
3731 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3732 * netif_wake_queue()
3733 */
3734netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3735{
3736        struct bnx2x *bp = netdev_priv(dev);
3737
3738        struct netdev_queue *txq;
3739        struct bnx2x_fp_txdata *txdata;
3740        struct sw_tx_bd *tx_buf;
3741        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3742        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3743        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3744        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3745        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3746        u32 pbd_e2_parsing_data = 0;
3747        u16 pkt_prod, bd_prod;
3748        int nbd, txq_index;
3749        dma_addr_t mapping;
3750        u32 xmit_type = bnx2x_xmit_type(bp, skb);
3751        int i;
3752        u8 hlen = 0;
3753        __le16 pkt_size = 0;
3754        struct ethhdr *eth;
3755        u8 mac_type = UNICAST_ADDRESS;
3756
3757#ifdef BNX2X_STOP_ON_ERROR
3758        if (unlikely(bp->panic))
3759                return NETDEV_TX_BUSY;
3760#endif
3761
3762        txq_index = skb_get_queue_mapping(skb);
3763        txq = netdev_get_tx_queue(dev, txq_index);
3764
3765        BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3766
3767        txdata = &bp->bnx2x_txq[txq_index];
3768
3769        /* enable this debug print to view the transmission queue being used
3770        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3771           txq_index, fp_index, txdata_index); */
3772
3773        /* enable this debug print to view the transmission details
3774        DP(NETIF_MSG_TX_QUEUED,
3775           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3776           txdata->cid, fp_index, txdata_index, txdata, fp); */
3777
3778        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3779                        skb_shinfo(skb)->nr_frags +
3780                        BDS_PER_TX_PKT +
3781                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3782                /* Handle special storage cases separately */
3783                if (txdata->tx_ring_size == 0) {
3784                        struct bnx2x_eth_q_stats *q_stats =
3785                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3786                        q_stats->driver_filtered_tx_pkt++;
3787                        dev_kfree_skb(skb);
3788                        return NETDEV_TX_OK;
3789                }
3790                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3791                netif_tx_stop_queue(txq);
3792                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3793
3794                return NETDEV_TX_BUSY;
3795        }
3796
3797        DP(NETIF_MSG_TX_QUEUED,
3798           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3799           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3800           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3801           skb->len);
3802
3803        eth = (struct ethhdr *)skb->data;
3804
3805        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3806        if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3807                if (is_broadcast_ether_addr(eth->h_dest))
3808                        mac_type = BROADCAST_ADDRESS;
3809                else
3810                        mac_type = MULTICAST_ADDRESS;
3811        }
3812
3813#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3814        /* First, check if we need to linearize the skb (due to FW
3815           restrictions). No need to check fragmentation if page size > 8K
3816           (there will be no violation to FW restrictions) */
3817        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3818                /* Statistics of linearization */
3819                bp->lin_cnt++;
3820                if (skb_linearize(skb) != 0) {
3821                        DP(NETIF_MSG_TX_QUEUED,
3822                           "SKB linearization failed - silently dropping this SKB\n");
3823                        dev_kfree_skb_any(skb);
3824                        return NETDEV_TX_OK;
3825                }
3826        }
3827#endif
3828        /* Map skb linear data for DMA */
3829        mapping = dma_map_single(&bp->pdev->dev, skb->data,
3830                                 skb_headlen(skb), DMA_TO_DEVICE);
3831        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3832                DP(NETIF_MSG_TX_QUEUED,
3833                   "SKB mapping failed - silently dropping this SKB\n");
3834                dev_kfree_skb_any(skb);
3835                return NETDEV_TX_OK;
3836        }
3837        /*
3838        Please read carefully. First we use one BD which we mark as start,
3839        then we have a parsing info BD (used for TSO or xsum),
3840        and only then we have the rest of the TSO BDs.
3841        (don't forget to mark the last one as last,
3842        and to unmap only AFTER you write to the BD ...)
3843        And above all, all pdb sizes are in words - NOT DWORDS!
3844        */
3845
3846        /* get current pkt produced now - advance it just before sending packet
3847         * since mapping of pages may fail and cause packet to be dropped
3848         */
3849        pkt_prod = txdata->tx_pkt_prod;
3850        bd_prod = TX_BD(txdata->tx_bd_prod);
3851
3852        /* get a tx_buf and first BD
3853         * tx_start_bd may be changed during SPLIT,
3854         * but first_bd will always stay first
3855         */
3856        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3857        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3858        first_bd = tx_start_bd;
3859
3860        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3861
3862        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3863                if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3864                        bp->eth_stats.ptp_skip_tx_ts++;
3865                        BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3866                } else if (bp->ptp_tx_skb) {
3867                        bp->eth_stats.ptp_skip_tx_ts++;
3868                        netdev_err_once(bp->dev,
3869                                        "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3870                } else {
3871                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3872                        /* schedule check for Tx timestamp */
3873                        bp->ptp_tx_skb = skb_get(skb);
3874                        bp->ptp_tx_start = jiffies;
3875                        schedule_work(&bp->ptp_task);
3876                }
3877        }
3878
3879        /* header nbd: indirectly zero other flags! */
3880        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3881
3882        /* remember the first BD of the packet */
3883        tx_buf->first_bd = txdata->tx_bd_prod;
3884        tx_buf->skb = skb;
3885        tx_buf->flags = 0;
3886
3887        DP(NETIF_MSG_TX_QUEUED,
3888           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3889           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3890
3891        if (skb_vlan_tag_present(skb)) {
3892                tx_start_bd->vlan_or_ethertype =
3893                    cpu_to_le16(skb_vlan_tag_get(skb));
3894                tx_start_bd->bd_flags.as_bitfield |=
3895                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3896        } else {
3897                /* when transmitting in a vf, start bd must hold the ethertype
3898                 * for fw to enforce it
3899                 */
3900                u16 vlan_tci = 0;
3901#ifndef BNX2X_STOP_ON_ERROR
3902                if (IS_VF(bp)) {
3903#endif
3904                        /* Still need to consider inband vlan for enforced */
3905                        if (__vlan_get_tag(skb, &vlan_tci)) {
3906                                tx_start_bd->vlan_or_ethertype =
3907                                        cpu_to_le16(ntohs(eth->h_proto));
3908                        } else {
3909                                tx_start_bd->bd_flags.as_bitfield |=
3910                                        (X_ETH_INBAND_VLAN <<
3911                                         ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3912                                tx_start_bd->vlan_or_ethertype =
3913                                        cpu_to_le16(vlan_tci);
3914                        }
3915#ifndef BNX2X_STOP_ON_ERROR
3916                } else {
3917                        /* used by FW for packet accounting */
3918                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3919                }
3920#endif
3921        }
3922
3923        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3924
3925        /* turn on parsing and get a BD */
3926        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3927
3928        if (xmit_type & XMIT_CSUM)
3929                bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3930
3931        if (!CHIP_IS_E1x(bp)) {
3932                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3933                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3934
3935                if (xmit_type & XMIT_CSUM_ENC) {
3936                        u16 global_data = 0;
3937
3938                        /* Set PBD in enc checksum offload case */
3939                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3940                                                      &pbd_e2_parsing_data,
3941                                                      xmit_type);
3942
3943                        /* turn on 2nd parsing and get a BD */
3944                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3945
3946                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3947
3948                        memset(pbd2, 0, sizeof(*pbd2));
3949
3950                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3951                                (skb_inner_network_header(skb) -
3952                                 skb->data) >> 1;
3953
3954                        if (xmit_type & XMIT_GSO_ENC)
3955                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3956                                                          &global_data,
3957                                                          xmit_type);
3958
3959                        pbd2->global_data = cpu_to_le16(global_data);
3960
3961                        /* add addition parse BD indication to start BD */
3962                        SET_FLAG(tx_start_bd->general_data,
3963                                 ETH_TX_START_BD_PARSE_NBDS, 1);
3964                        /* set encapsulation flag in start BD */
3965                        SET_FLAG(tx_start_bd->general_data,
3966                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3967
3968                        tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3969
3970                        nbd++;
3971                } else if (xmit_type & XMIT_CSUM) {
3972                        /* Set PBD in checksum offload case w/o encapsulation */
3973                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3974                                                     &pbd_e2_parsing_data,
3975                                                     xmit_type);
3976                }
3977
3978                bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3979                /* Add the macs to the parsing BD if this is a vf or if
3980                 * Tx Switching is enabled.
3981                 */
3982                if (IS_VF(bp)) {
3983                        /* override GRE parameters in BD */
3984                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3985                                              &pbd_e2->data.mac_addr.src_mid,
3986                                              &pbd_e2->data.mac_addr.src_lo,
3987                                              eth->h_source);
3988
3989                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3990                                              &pbd_e2->data.mac_addr.dst_mid,
3991                                              &pbd_e2->data.mac_addr.dst_lo,
3992                                              eth->h_dest);
3993                } else {
3994                        if (bp->flags & TX_SWITCHING)
3995                                bnx2x_set_fw_mac_addr(
3996                                                &pbd_e2->data.mac_addr.dst_hi,
3997                                                &pbd_e2->data.mac_addr.dst_mid,
3998                                                &pbd_e2->data.mac_addr.dst_lo,
3999                                                eth->h_dest);
4000#ifdef BNX2X_STOP_ON_ERROR
4001                        /* Enforce security is always set in Stop on Error -
4002                         * source mac should be present in the parsing BD
4003                         */
4004                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4005                                              &pbd_e2->data.mac_addr.src_mid,
4006                                              &pbd_e2->data.mac_addr.src_lo,
4007                                              eth->h_source);
4008#endif
4009                }
4010
4011                SET_FLAG(pbd_e2_parsing_data,
4012                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4013        } else {
4014                u16 global_data = 0;
4015                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4016                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4017                /* Set PBD in checksum offload case */
4018                if (xmit_type & XMIT_CSUM)
4019                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4020
4021                SET_FLAG(global_data,
4022                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4023                pbd_e1x->global_data |= cpu_to_le16(global_data);
4024        }
4025
4026        /* Setup the data pointer of the first BD of the packet */
4027        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4028        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4029        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4030        pkt_size = tx_start_bd->nbytes;
4031
4032        DP(NETIF_MSG_TX_QUEUED,
4033           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4034           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4035           le16_to_cpu(tx_start_bd->nbytes),
4036           tx_start_bd->bd_flags.as_bitfield,
4037           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4038
4039        if (xmit_type & XMIT_GSO) {
4040
4041                DP(NETIF_MSG_TX_QUEUED,
4042                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4043                   skb->len, hlen, skb_headlen(skb),
4044                   skb_shinfo(skb)->gso_size);
4045
4046                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4047
4048                if (unlikely(skb_headlen(skb) > hlen)) {
4049                        nbd++;
4050                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4051                                                 &tx_start_bd, hlen,
4052                                                 bd_prod);
4053                }
4054                if (!CHIP_IS_E1x(bp))
4055                        pbd_e2_parsing_data |=
4056                                (skb_shinfo(skb)->gso_size <<
4057                                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4058                                 ETH_TX_PARSE_BD_E2_LSO_MSS;
4059                else
4060                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4061        }
4062
4063        /* Set the PBD's parsing_data field if not zero
4064         * (for the chips newer than 57711).
4065         */
4066        if (pbd_e2_parsing_data)
4067                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4068
4069        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4070
4071        /* Handle fragmented skb */
4072        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4073                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4074
4075                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4076                                           skb_frag_size(frag), DMA_TO_DEVICE);
4077                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4078                        unsigned int pkts_compl = 0, bytes_compl = 0;
4079
4080                        DP(NETIF_MSG_TX_QUEUED,
4081                           "Unable to map page - dropping packet...\n");
4082
4083                        /* we need unmap all buffers already mapped
4084                         * for this SKB;
4085                         * first_bd->nbd need to be properly updated
4086                         * before call to bnx2x_free_tx_pkt
4087                         */
4088                        first_bd->nbd = cpu_to_le16(nbd);
4089                        bnx2x_free_tx_pkt(bp, txdata,
4090                                          TX_BD(txdata->tx_pkt_prod),
4091                                          &pkts_compl, &bytes_compl);
4092                        return NETDEV_TX_OK;
4093                }
4094
4095                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4096                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4097                if (total_pkt_bd == NULL)
4098                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4099
4100                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4101                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4102                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4103                le16_add_cpu(&pkt_size, skb_frag_size(frag));
4104                nbd++;
4105
4106                DP(NETIF_MSG_TX_QUEUED,
4107                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4108                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4109                   le16_to_cpu(tx_data_bd->nbytes));
4110        }
4111
4112        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4113
4114        /* update with actual num BDs */
4115        first_bd->nbd = cpu_to_le16(nbd);
4116
4117        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4118
4119        /* now send a tx doorbell, counting the next BD
4120         * if the packet contains or ends with it
4121         */
4122        if (TX_BD_POFF(bd_prod) < nbd)
4123                nbd++;
4124
4125        /* total_pkt_bytes should be set on the first data BD if
4126         * it's not an LSO packet and there is more than one
4127         * data BD. In this case pkt_size is limited by an MTU value.
4128         * However we prefer to set it for an LSO packet (while we don't
4129         * have to) in order to save some CPU cycles in a none-LSO
4130         * case, when we much more care about them.
4131         */
4132        if (total_pkt_bd != NULL)
4133                total_pkt_bd->total_pkt_bytes = pkt_size;
4134
4135        if (pbd_e1x)
4136                DP(NETIF_MSG_TX_QUEUED,
4137                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4138                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4139                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4140                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4141                    le16_to_cpu(pbd_e1x->total_hlen_w));
4142        if (pbd_e2)
4143                DP(NETIF_MSG_TX_QUEUED,
4144                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4145                   pbd_e2,
4146                   pbd_e2->data.mac_addr.dst_hi,
4147                   pbd_e2->data.mac_addr.dst_mid,
4148                   pbd_e2->data.mac_addr.dst_lo,
4149                   pbd_e2->data.mac_addr.src_hi,
4150                   pbd_e2->data.mac_addr.src_mid,
4151                   pbd_e2->data.mac_addr.src_lo,
4152                   pbd_e2->parsing_data);
4153        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4154
4155        netdev_tx_sent_queue(txq, skb->len);
4156
4157        skb_tx_timestamp(skb);
4158
4159        txdata->tx_pkt_prod++;
4160        /*
4161         * Make sure that the BD data is updated before updating the producer
4162         * since FW might read the BD right after the producer is updated.
4163         * This is only applicable for weak-ordered memory model archs such
4164         * as IA-64. The following barrier is also mandatory since FW will
4165         * assumes packets must have BDs.
4166         */
4167        wmb();
4168
4169        txdata->tx_db.data.prod += nbd;
4170        /* make sure descriptor update is observed by HW */
4171        wmb();
4172
4173        DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4174
4175        txdata->tx_bd_prod += nbd;
4176
4177        if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4178                netif_tx_stop_queue(txq);
4179
4180                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4181                 * ordering of set_bit() in netif_tx_stop_queue() and read of
4182                 * fp->bd_tx_cons */
4183                smp_mb();
4184
4185                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4186                if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4187                        netif_tx_wake_queue(txq);
4188        }
4189        txdata->tx_pkt++;
4190
4191        return NETDEV_TX_OK;
4192}
4193
4194void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4195{
4196        int mfw_vn = BP_FW_MB_IDX(bp);
4197        u32 tmp;
4198
4199        /* If the shmem shouldn't affect configuration, reflect */
4200        if (!IS_MF_BD(bp)) {
4201                int i;
4202
4203                for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4204                        c2s_map[i] = i;
4205                *c2s_default = 0;
4206
4207                return;
4208        }
4209
4210        tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4211        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4212        c2s_map[0] = tmp & 0xff;
4213        c2s_map[1] = (tmp >> 8) & 0xff;
4214        c2s_map[2] = (tmp >> 16) & 0xff;
4215        c2s_map[3] = (tmp >> 24) & 0xff;
4216
4217        tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4218        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4219        c2s_map[4] = tmp & 0xff;
4220        c2s_map[5] = (tmp >> 8) & 0xff;
4221        c2s_map[6] = (tmp >> 16) & 0xff;
4222        c2s_map[7] = (tmp >> 24) & 0xff;
4223
4224        tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4225        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4226        *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4227}
4228
4229/**
4230 * bnx2x_setup_tc - routine to configure net_device for multi tc
4231 *
4232 * @netdev: net device to configure
4233 * @tc: number of traffic classes to enable
4234 *
4235 * callback connected to the ndo_setup_tc function pointer
4236 */
4237int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4238{
4239        struct bnx2x *bp = netdev_priv(dev);
4240        u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4241        int cos, prio, count, offset;
4242
4243        /* setup tc must be called under rtnl lock */
4244        ASSERT_RTNL();
4245
4246        /* no traffic classes requested. Aborting */
4247        if (!num_tc) {
4248                netdev_reset_tc(dev);
4249                return 0;
4250        }
4251
4252        /* requested to support too many traffic classes */
4253        if (num_tc > bp->max_cos) {
4254                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4255                          num_tc, bp->max_cos);
4256                return -EINVAL;
4257        }
4258
4259        /* declare amount of supported traffic classes */
4260        if (netdev_set_num_tc(dev, num_tc)) {
4261                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4262                return -EINVAL;
4263        }
4264
4265        bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4266
4267        /* configure priority to traffic class mapping */
4268        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4269                int outer_prio = c2s_map[prio];
4270
4271                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4272                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4273                   "mapping priority %d to tc %d\n",
4274                   outer_prio, bp->prio_to_cos[outer_prio]);
4275        }
4276
4277        /* Use this configuration to differentiate tc0 from other COSes
4278           This can be used for ets or pfc, and save the effort of setting
4279           up a multio class queue disc or negotiating DCBX with a switch
4280        netdev_set_prio_tc_map(dev, 0, 0);
4281        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4282        for (prio = 1; prio < 16; prio++) {
4283                netdev_set_prio_tc_map(dev, prio, 1);
4284                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4285        } */
4286
4287        /* configure traffic class to transmission queue mapping */
4288        for (cos = 0; cos < bp->max_cos; cos++) {
4289                count = BNX2X_NUM_ETH_QUEUES(bp);
4290                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4291                netdev_set_tc_queue(dev, cos, count, offset);
4292                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4293                   "mapping tc %d to offset %d count %d\n",
4294                   cos, offset, count);
4295        }
4296
4297        return 0;
4298}
4299
4300int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4301                     void *type_data)
4302{
4303        struct tc_mqprio_qopt *mqprio = type_data;
4304
4305        if (type != TC_SETUP_QDISC_MQPRIO)
4306                return -EOPNOTSUPP;
4307
4308        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4309
4310        return bnx2x_setup_tc(dev, mqprio->num_tc);
4311}
4312
4313/* called with rtnl_lock */
4314int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4315{
4316        struct sockaddr *addr = p;
4317        struct bnx2x *bp = netdev_priv(dev);
4318        int rc = 0;
4319
4320        if (!is_valid_ether_addr(addr->sa_data)) {
4321                BNX2X_ERR("Requested MAC address is not valid\n");
4322                return -EINVAL;
4323        }
4324
4325        if (IS_MF_STORAGE_ONLY(bp)) {
4326                BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4327                return -EINVAL;
4328        }
4329
4330        if (netif_running(dev))  {
4331                rc = bnx2x_set_eth_mac(bp, false);
4332                if (rc)
4333                        return rc;
4334        }
4335
4336        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4337
4338        if (netif_running(dev))
4339                rc = bnx2x_set_eth_mac(bp, true);
4340
4341        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4342                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4343
4344        return rc;
4345}
4346
4347static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4348{
4349        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4350        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4351        u8 cos;
4352
4353        /* Common */
4354
4355        if (IS_FCOE_IDX(fp_index)) {
4356                memset(sb, 0, sizeof(union host_hc_status_block));
4357                fp->status_blk_mapping = 0;
4358        } else {
4359                /* status blocks */
4360                if (!CHIP_IS_E1x(bp))
4361                        BNX2X_PCI_FREE(sb->e2_sb,
4362                                       bnx2x_fp(bp, fp_index,
4363                                                status_blk_mapping),
4364                                       sizeof(struct host_hc_status_block_e2));
4365                else
4366                        BNX2X_PCI_FREE(sb->e1x_sb,
4367                                       bnx2x_fp(bp, fp_index,
4368                                                status_blk_mapping),
4369                                       sizeof(struct host_hc_status_block_e1x));
4370        }
4371
4372        /* Rx */
4373        if (!skip_rx_queue(bp, fp_index)) {
4374                bnx2x_free_rx_bds(fp);
4375
4376                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4377                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4378                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4379                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4380                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4381
4382                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4383                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4384                               sizeof(struct eth_fast_path_rx_cqe) *
4385                               NUM_RCQ_BD);
4386
4387                /* SGE ring */
4388                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4389                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4390                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4391                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4392        }
4393
4394        /* Tx */
4395        if (!skip_tx_queue(bp, fp_index)) {
4396                /* fastpath tx rings: tx_buf tx_desc */
4397                for_each_cos_in_tx_queue(fp, cos) {
4398                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4399
4400                        DP(NETIF_MSG_IFDOWN,
4401                           "freeing tx memory of fp %d cos %d cid %d\n",
4402                           fp_index, cos, txdata->cid);
4403
4404                        BNX2X_FREE(txdata->tx_buf_ring);
4405                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4406                                txdata->tx_desc_mapping,
4407                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4408                }
4409        }
4410        /* end of fastpath */
4411}
4412
4413static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4414{
4415        int i;
4416        for_each_cnic_queue(bp, i)
4417                bnx2x_free_fp_mem_at(bp, i);
4418}
4419
4420void bnx2x_free_fp_mem(struct bnx2x *bp)
4421{
4422        int i;
4423        for_each_eth_queue(bp, i)
4424                bnx2x_free_fp_mem_at(bp, i);
4425}
4426
4427static void set_sb_shortcuts(struct bnx2x *bp, int index)
4428{
4429        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4430        if (!CHIP_IS_E1x(bp)) {
4431                bnx2x_fp(bp, index, sb_index_values) =
4432                        (__le16 *)status_blk.e2_sb->sb.index_values;
4433                bnx2x_fp(bp, index, sb_running_index) =
4434                        (__le16 *)status_blk.e2_sb->sb.running_index;
4435        } else {
4436                bnx2x_fp(bp, index, sb_index_values) =
4437                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4438                bnx2x_fp(bp, index, sb_running_index) =
4439                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4440        }
4441}
4442
4443/* Returns the number of actually allocated BDs */
4444static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4445                              int rx_ring_size)
4446{
4447        struct bnx2x *bp = fp->bp;
4448        u16 ring_prod, cqe_ring_prod;
4449        int i, failure_cnt = 0;
4450
4451        fp->rx_comp_cons = 0;
4452        cqe_ring_prod = ring_prod = 0;
4453
4454        /* This routine is called only during fo init so
4455         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4456         */
4457        for (i = 0; i < rx_ring_size; i++) {
4458                if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4459                        failure_cnt++;
4460                        continue;
4461                }
4462                ring_prod = NEXT_RX_IDX(ring_prod);
4463                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4464                WARN_ON(ring_prod <= (i - failure_cnt));
4465        }
4466
4467        if (failure_cnt)
4468                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4469                          i - failure_cnt, fp->index);
4470
4471        fp->rx_bd_prod = ring_prod;
4472        /* Limit the CQE producer by the CQE ring size */
4473        fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4474                               cqe_ring_prod);
4475
4476        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4477
4478        return i - failure_cnt;
4479}
4480
4481static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4482{
4483        int i;
4484
4485        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4486                struct eth_rx_cqe_next_page *nextpg;
4487
4488                nextpg = (struct eth_rx_cqe_next_page *)
4489                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4490                nextpg->addr_hi =
4491                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4492                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4493                nextpg->addr_lo =
4494                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4495                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4496        }
4497}
4498
4499static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4500{
4501        union host_hc_status_block *sb;
4502        struct bnx2x_fastpath *fp = &bp->fp[index];
4503        int ring_size = 0;
4504        u8 cos;
4505        int rx_ring_size = 0;
4506
4507        if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4508                rx_ring_size = MIN_RX_SIZE_NONTPA;
4509                bp->rx_ring_size = rx_ring_size;
4510        } else if (!bp->rx_ring_size) {
4511                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4512
4513                if (CHIP_IS_E3(bp)) {
4514                        u32 cfg = SHMEM_RD(bp,
4515                                           dev_info.port_hw_config[BP_PORT(bp)].
4516                                           default_cfg);
4517
4518                        /* Decrease ring size for 1G functions */
4519                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4520                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4521                                rx_ring_size /= 10;
4522                }
4523
4524                /* allocate at least number of buffers required by FW */
4525                rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4526                                     MIN_RX_SIZE_TPA, rx_ring_size);
4527
4528                bp->rx_ring_size = rx_ring_size;
4529        } else /* if rx_ring_size specified - use it */
4530                rx_ring_size = bp->rx_ring_size;
4531
4532        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4533
4534        /* Common */
4535        sb = &bnx2x_fp(bp, index, status_blk);
4536
4537        if (!IS_FCOE_IDX(index)) {
4538                /* status blocks */
4539                if (!CHIP_IS_E1x(bp)) {
4540                        sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4541                                                    sizeof(struct host_hc_status_block_e2));
4542                        if (!sb->e2_sb)
4543                                goto alloc_mem_err;
4544                } else {
4545                        sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4546                                                     sizeof(struct host_hc_status_block_e1x));
4547                        if (!sb->e1x_sb)
4548                                goto alloc_mem_err;
4549                }
4550        }
4551
4552        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4553         * set shortcuts for it.
4554         */
4555        if (!IS_FCOE_IDX(index))
4556                set_sb_shortcuts(bp, index);
4557
4558        /* Tx */
4559        if (!skip_tx_queue(bp, index)) {
4560                /* fastpath tx rings: tx_buf tx_desc */
4561                for_each_cos_in_tx_queue(fp, cos) {
4562                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4563
4564                        DP(NETIF_MSG_IFUP,
4565                           "allocating tx memory of fp %d cos %d\n",
4566                           index, cos);
4567
4568                        txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4569                                                      sizeof(struct sw_tx_bd),
4570                                                      GFP_KERNEL);
4571                        if (!txdata->tx_buf_ring)
4572                                goto alloc_mem_err;
4573                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4574                                                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4575                        if (!txdata->tx_desc_ring)
4576                                goto alloc_mem_err;
4577                }
4578        }
4579
4580        /* Rx */
4581        if (!skip_rx_queue(bp, index)) {
4582                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4583                bnx2x_fp(bp, index, rx_buf_ring) =
4584                        kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4585                if (!bnx2x_fp(bp, index, rx_buf_ring))
4586                        goto alloc_mem_err;
4587                bnx2x_fp(bp, index, rx_desc_ring) =
4588                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4589                                        sizeof(struct eth_rx_bd) * NUM_RX_BD);
4590                if (!bnx2x_fp(bp, index, rx_desc_ring))
4591                        goto alloc_mem_err;
4592
4593                /* Seed all CQEs by 1s */
4594                bnx2x_fp(bp, index, rx_comp_ring) =
4595                        BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4596                                         sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4597                if (!bnx2x_fp(bp, index, rx_comp_ring))
4598                        goto alloc_mem_err;
4599
4600                /* SGE ring */
4601                bnx2x_fp(bp, index, rx_page_ring) =
4602                        kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4603                                GFP_KERNEL);
4604                if (!bnx2x_fp(bp, index, rx_page_ring))
4605                        goto alloc_mem_err;
4606                bnx2x_fp(bp, index, rx_sge_ring) =
4607                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4608                                        BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4609                if (!bnx2x_fp(bp, index, rx_sge_ring))
4610                        goto alloc_mem_err;
4611                /* RX BD ring */
4612                bnx2x_set_next_page_rx_bd(fp);
4613
4614                /* CQ ring */
4615                bnx2x_set_next_page_rx_cq(fp);
4616
4617                /* BDs */
4618                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4619                if (ring_size < rx_ring_size)
4620                        goto alloc_mem_err;
4621        }
4622
4623        return 0;
4624
4625/* handles low memory cases */
4626alloc_mem_err:
4627        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4628                                                index, ring_size);
4629        /* FW will drop all packets if queue is not big enough,
4630         * In these cases we disable the queue
4631         * Min size is different for OOO, TPA and non-TPA queues
4632         */
4633        if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4634                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4635                        /* release memory allocated for this queue */
4636                        bnx2x_free_fp_mem_at(bp, index);
4637                        return -ENOMEM;
4638        }
4639        return 0;
4640}
4641
4642static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4643{
4644        if (!NO_FCOE(bp))
4645                /* FCoE */
4646                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4647                        /* we will fail load process instead of mark
4648                         * NO_FCOE_FLAG
4649                         */
4650                        return -ENOMEM;
4651
4652        return 0;
4653}
4654
4655static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4656{
4657        int i;
4658
4659        /* 1. Allocate FP for leading - fatal if error
4660         * 2. Allocate RSS - fix number of queues if error
4661         */
4662
4663        /* leading */
4664        if (bnx2x_alloc_fp_mem_at(bp, 0))
4665                return -ENOMEM;
4666
4667        /* RSS */
4668        for_each_nondefault_eth_queue(bp, i)
4669                if (bnx2x_alloc_fp_mem_at(bp, i))
4670                        break;
4671
4672        /* handle memory failures */
4673        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4674                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4675
4676                WARN_ON(delta < 0);
4677                bnx2x_shrink_eth_fp(bp, delta);
4678                if (CNIC_SUPPORT(bp))
4679                        /* move non eth FPs next to last eth FP
4680                         * must be done in that order
4681                         * FCOE_IDX < FWD_IDX < OOO_IDX
4682                         */
4683
4684                        /* move FCoE fp even NO_FCOE_FLAG is on */
4685                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4686                bp->num_ethernet_queues -= delta;
4687                bp->num_queues = bp->num_ethernet_queues +
4688                                 bp->num_cnic_queues;
4689                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4690                          bp->num_queues + delta, bp->num_queues);
4691        }
4692
4693        return 0;
4694}
4695
4696void bnx2x_free_mem_bp(struct bnx2x *bp)
4697{
4698        int i;
4699
4700        for (i = 0; i < bp->fp_array_size; i++)
4701                kfree(bp->fp[i].tpa_info);
4702        kfree(bp->fp);
4703        kfree(bp->sp_objs);
4704        kfree(bp->fp_stats);
4705        kfree(bp->bnx2x_txq);
4706        kfree(bp->msix_table);
4707        kfree(bp->ilt);
4708}
4709
4710int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4711{
4712        struct bnx2x_fastpath *fp;
4713        struct msix_entry *tbl;
4714        struct bnx2x_ilt *ilt;
4715        int msix_table_size = 0;
4716        int fp_array_size, txq_array_size;
4717        int i;
4718
4719        /*
4720         * The biggest MSI-X table we might need is as a maximum number of fast
4721         * path IGU SBs plus default SB (for PF only).
4722         */
4723        msix_table_size = bp->igu_sb_cnt;
4724        if (IS_PF(bp))
4725                msix_table_size++;
4726        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4727
4728        /* fp array: RSS plus CNIC related L2 queues */
4729        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4730        bp->fp_array_size = fp_array_size;
4731        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4732
4733        fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4734        if (!fp)
4735                goto alloc_err;
4736        for (i = 0; i < bp->fp_array_size; i++) {
4737                fp[i].tpa_info =
4738                        kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4739                                sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4740                if (!(fp[i].tpa_info))
4741                        goto alloc_err;
4742        }
4743
4744        bp->fp = fp;
4745
4746        /* allocate sp objs */
4747        bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4748                              GFP_KERNEL);
4749        if (!bp->sp_objs)
4750                goto alloc_err;
4751
4752        /* allocate fp_stats */
4753        bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4754                               GFP_KERNEL);
4755        if (!bp->fp_stats)
4756                goto alloc_err;
4757
4758        /* Allocate memory for the transmission queues array */
4759        txq_array_size =
4760                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4761        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4762
4763        bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4764                                GFP_KERNEL);
4765        if (!bp->bnx2x_txq)
4766                goto alloc_err;
4767
4768        /* msix table */
4769        tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4770        if (!tbl)
4771                goto alloc_err;
4772        bp->msix_table = tbl;
4773
4774        /* ilt */
4775        ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4776        if (!ilt)
4777                goto alloc_err;
4778        bp->ilt = ilt;
4779
4780        return 0;
4781alloc_err:
4782        bnx2x_free_mem_bp(bp);
4783        return -ENOMEM;
4784}
4785
4786int bnx2x_reload_if_running(struct net_device *dev)
4787{
4788        struct bnx2x *bp = netdev_priv(dev);
4789
4790        if (unlikely(!netif_running(dev)))
4791                return 0;
4792
4793        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4794        return bnx2x_nic_load(bp, LOAD_NORMAL);
4795}
4796
4797int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4798{
4799        u32 sel_phy_idx = 0;
4800        if (bp->link_params.num_phys <= 1)
4801                return INT_PHY;
4802
4803        if (bp->link_vars.link_up) {
4804                sel_phy_idx = EXT_PHY1;
4805                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4806                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4807                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4808                        sel_phy_idx = EXT_PHY2;
4809        } else {
4810
4811                switch (bnx2x_phy_selection(&bp->link_params)) {
4812                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4813                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4814                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4815                       sel_phy_idx = EXT_PHY1;
4816                       break;
4817                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4818                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4819                       sel_phy_idx = EXT_PHY2;
4820                       break;
4821                }
4822        }
4823
4824        return sel_phy_idx;
4825}
4826int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4827{
4828        u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4829        /*
4830         * The selected activated PHY is always after swapping (in case PHY
4831         * swapping is enabled). So when swapping is enabled, we need to reverse
4832         * the configuration
4833         */
4834
4835        if (bp->link_params.multi_phy_config &
4836            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4837                if (sel_phy_idx == EXT_PHY1)
4838                        sel_phy_idx = EXT_PHY2;
4839                else if (sel_phy_idx == EXT_PHY2)
4840                        sel_phy_idx = EXT_PHY1;
4841        }
4842        return LINK_CONFIG_IDX(sel_phy_idx);
4843}
4844
4845#ifdef NETDEV_FCOE_WWNN
4846int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4847{
4848        struct bnx2x *bp = netdev_priv(dev);
4849        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4850
4851        switch (type) {
4852        case NETDEV_FCOE_WWNN:
4853                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4854                                cp->fcoe_wwn_node_name_lo);
4855                break;
4856        case NETDEV_FCOE_WWPN:
4857                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4858                                cp->fcoe_wwn_port_name_lo);
4859                break;
4860        default:
4861                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4862                return -EINVAL;
4863        }
4864
4865        return 0;
4866}
4867#endif
4868
4869/* called with rtnl_lock */
4870int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4871{
4872        struct bnx2x *bp = netdev_priv(dev);
4873
4874        if (pci_num_vf(bp->pdev)) {
4875                DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4876                return -EPERM;
4877        }
4878
4879        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4880                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4881                return -EAGAIN;
4882        }
4883
4884        /* This does not race with packet allocation
4885         * because the actual alloc size is
4886         * only updated as part of load
4887         */
4888        dev->mtu = new_mtu;
4889
4890        if (!bnx2x_mtu_allows_gro(new_mtu))
4891                dev->features &= ~NETIF_F_GRO_HW;
4892
4893        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4894                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4895
4896        return bnx2x_reload_if_running(dev);
4897}
4898
4899netdev_features_t bnx2x_fix_features(struct net_device *dev,
4900                                     netdev_features_t features)
4901{
4902        struct bnx2x *bp = netdev_priv(dev);
4903
4904        if (pci_num_vf(bp->pdev)) {
4905                netdev_features_t changed = dev->features ^ features;
4906
4907                /* Revert the requested changes in features if they
4908                 * would require internal reload of PF in bnx2x_set_features().
4909                 */
4910                if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4911                        features &= ~NETIF_F_RXCSUM;
4912                        features |= dev->features & NETIF_F_RXCSUM;
4913                }
4914
4915                if (changed & NETIF_F_LOOPBACK) {
4916                        features &= ~NETIF_F_LOOPBACK;
4917                        features |= dev->features & NETIF_F_LOOPBACK;
4918                }
4919        }
4920
4921        /* TPA requires Rx CSUM offloading */
4922        if (!(features & NETIF_F_RXCSUM))
4923                features &= ~NETIF_F_LRO;
4924
4925        if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4926                features &= ~NETIF_F_GRO_HW;
4927        if (features & NETIF_F_GRO_HW)
4928                features &= ~NETIF_F_LRO;
4929
4930        return features;
4931}
4932
4933int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4934{
4935        struct bnx2x *bp = netdev_priv(dev);
4936        netdev_features_t changes = features ^ dev->features;
4937        bool bnx2x_reload = false;
4938        int rc;
4939
4940        /* VFs or non SRIOV PFs should be able to change loopback feature */
4941        if (!pci_num_vf(bp->pdev)) {
4942                if (features & NETIF_F_LOOPBACK) {
4943                        if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4944                                bp->link_params.loopback_mode = LOOPBACK_BMAC;
4945                                bnx2x_reload = true;
4946                        }
4947                } else {
4948                        if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4949                                bp->link_params.loopback_mode = LOOPBACK_NONE;
4950                                bnx2x_reload = true;
4951                        }
4952                }
4953        }
4954
4955        /* Don't care about GRO changes */
4956        changes &= ~NETIF_F_GRO;
4957
4958        if (changes)
4959                bnx2x_reload = true;
4960
4961        if (bnx2x_reload) {
4962                if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4963                        dev->features = features;
4964                        rc = bnx2x_reload_if_running(dev);
4965                        return rc ? rc : 1;
4966                }
4967                /* else: bnx2x_nic_load() will be called at end of recovery */
4968        }
4969
4970        return 0;
4971}
4972
4973void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4974{
4975        struct bnx2x *bp = netdev_priv(dev);
4976
4977        /* We want the information of the dump logged,
4978         * but calling bnx2x_panic() would kill all chances of recovery.
4979         */
4980        if (!bp->panic)
4981#ifndef BNX2X_STOP_ON_ERROR
4982                bnx2x_panic_dump(bp, false);
4983#else
4984                bnx2x_panic();
4985#endif
4986
4987        /* This allows the netif to be shutdown gracefully before resetting */
4988        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4989}
4990
4991static int __maybe_unused bnx2x_suspend(struct device *dev_d)
4992{
4993        struct pci_dev *pdev = to_pci_dev(dev_d);
4994        struct net_device *dev = pci_get_drvdata(pdev);
4995        struct bnx2x *bp;
4996
4997        if (!dev) {
4998                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4999                return -ENODEV;
5000        }
5001        bp = netdev_priv(dev);
5002
5003        rtnl_lock();
5004
5005        if (!netif_running(dev)) {
5006                rtnl_unlock();
5007                return 0;
5008        }
5009
5010        netif_device_detach(dev);
5011
5012        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5013
5014        rtnl_unlock();
5015
5016        return 0;
5017}
5018
5019static int __maybe_unused bnx2x_resume(struct device *dev_d)
5020{
5021        struct pci_dev *pdev = to_pci_dev(dev_d);
5022        struct net_device *dev = pci_get_drvdata(pdev);
5023        struct bnx2x *bp;
5024        int rc;
5025
5026        if (!dev) {
5027                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5028                return -ENODEV;
5029        }
5030        bp = netdev_priv(dev);
5031
5032        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5033                BNX2X_ERR("Handling parity error recovery. Try again later\n");
5034                return -EAGAIN;
5035        }
5036
5037        rtnl_lock();
5038
5039        if (!netif_running(dev)) {
5040                rtnl_unlock();
5041                return 0;
5042        }
5043
5044        netif_device_attach(dev);
5045
5046        rc = bnx2x_nic_load(bp, LOAD_OPEN);
5047
5048        rtnl_unlock();
5049
5050        return rc;
5051}
5052
5053SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
5054
5055void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5056                              u32 cid)
5057{
5058        if (!cxt) {
5059                BNX2X_ERR("bad context pointer %p\n", cxt);
5060                return;
5061        }
5062
5063        /* ustorm cxt validation */
5064        cxt->ustorm_ag_context.cdu_usage =
5065                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5066                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5067        /* xcontext validation */
5068        cxt->xstorm_ag_context.cdu_reserved =
5069                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5070                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5071}
5072
5073static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5074                                    u8 fw_sb_id, u8 sb_index,
5075                                    u8 ticks)
5076{
5077        u32 addr = BAR_CSTRORM_INTMEM +
5078                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5079        REG_WR8(bp, addr, ticks);
5080        DP(NETIF_MSG_IFUP,
5081           "port %x fw_sb_id %d sb_index %d ticks %d\n",
5082           port, fw_sb_id, sb_index, ticks);
5083}
5084
5085static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5086                                    u16 fw_sb_id, u8 sb_index,
5087                                    u8 disable)
5088{
5089        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5090        u32 addr = BAR_CSTRORM_INTMEM +
5091                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5092        u8 flags = REG_RD8(bp, addr);
5093        /* clear and set */
5094        flags &= ~HC_INDEX_DATA_HC_ENABLED;
5095        flags |= enable_flag;
5096        REG_WR8(bp, addr, flags);
5097        DP(NETIF_MSG_IFUP,
5098           "port %x fw_sb_id %d sb_index %d disable %d\n",
5099           port, fw_sb_id, sb_index, disable);
5100}
5101
5102void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5103                                    u8 sb_index, u8 disable, u16 usec)
5104{
5105        int port = BP_PORT(bp);
5106        u8 ticks = usec / BNX2X_BTR;
5107
5108        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5109
5110        disable = disable ? 1 : (usec ? 0 : 1);
5111        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5112}
5113
5114void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5115                            u32 verbose)
5116{
5117        smp_mb__before_atomic();
5118        set_bit(flag, &bp->sp_rtnl_state);
5119        smp_mb__after_atomic();
5120        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5121           flag);
5122        schedule_delayed_work(&bp->sp_rtnl_task, 0);
5123}
5124