linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: QLogic Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 * Copyright (c) 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation.
  10 *
  11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  12 * Written by: Eliezer Tamir
  13 * Based on code from Michael Chan's bnx2 driver
  14 * UDP CSUM errata workaround by Arik Gendelman
  15 * Slowpath and fastpath rework by Vladislav Zolotarov
  16 * Statistics and Link management by Yitchak Gertner
  17 *
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/interrupt.h>
  25#include <linux/ip.h>
  26#include <linux/crash_dump.h>
  27#include <net/tcp.h>
  28#include <net/ipv6.h>
  29#include <net/ip6_checksum.h>
  30#include <linux/prefetch.h>
  31#include "bnx2x_cmn.h"
  32#include "bnx2x_init.h"
  33#include "bnx2x_sp.h"
  34
  35static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  36static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
  37static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  38static int bnx2x_poll(struct napi_struct *napi, int budget);
  39
  40static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
  41{
  42        int i;
  43
  44        /* Add NAPI objects */
  45        for_each_rx_queue_cnic(bp, i) {
  46                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  47                               bnx2x_poll, NAPI_POLL_WEIGHT);
  48        }
  49}
  50
  51static void bnx2x_add_all_napi(struct bnx2x *bp)
  52{
  53        int i;
  54
  55        /* Add NAPI objects */
  56        for_each_eth_queue(bp, i) {
  57                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  58                               bnx2x_poll, NAPI_POLL_WEIGHT);
  59        }
  60}
  61
  62static int bnx2x_calc_num_queues(struct bnx2x *bp)
  63{
  64        int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
  65
  66        /* Reduce memory usage in kdump environment by using only one queue */
  67        if (is_kdump_kernel())
  68                nq = 1;
  69
  70        nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
  71        return nq;
  72}
  73
  74/**
  75 * bnx2x_move_fp - move content of the fastpath structure.
  76 *
  77 * @bp:         driver handle
  78 * @from:       source FP index
  79 * @to:         destination FP index
  80 *
  81 * Makes sure the contents of the bp->fp[to].napi is kept
  82 * intact. This is done by first copying the napi struct from
  83 * the target to the source, and then mem copying the entire
  84 * source onto the target. Update txdata pointers and related
  85 * content.
  86 */
  87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  88{
  89        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  90        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  91        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  92        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  93        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  94        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  95        int old_max_eth_txqs, new_max_eth_txqs;
  96        int old_txdata_index = 0, new_txdata_index = 0;
  97        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  98
  99        /* Copy the NAPI object as it has been already initialized */
 100        from_fp->napi = to_fp->napi;
 101
 102        /* Move bnx2x_fastpath contents */
 103        memcpy(to_fp, from_fp, sizeof(*to_fp));
 104        to_fp->index = to;
 105
 106        /* Retain the tpa_info of the original `to' version as we don't want
 107         * 2 FPs to contain the same tpa_info pointer.
 108         */
 109        to_fp->tpa_info = old_tpa_info;
 110
 111        /* move sp_objs contents as well, as their indices match fp ones */
 112        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
 113
 114        /* move fp_stats contents as well, as their indices match fp ones */
 115        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
 116
 117        /* Update txdata pointers in fp and move txdata content accordingly:
 118         * Each fp consumes 'max_cos' txdata structures, so the index should be
 119         * decremented by max_cos x delta.
 120         */
 121
 122        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
 123        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
 124                                (bp)->max_cos;
 125        if (from == FCOE_IDX(bp)) {
 126                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 127                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 128        }
 129
 130        memcpy(&bp->bnx2x_txq[new_txdata_index],
 131               &bp->bnx2x_txq[old_txdata_index],
 132               sizeof(struct bnx2x_fp_txdata));
 133        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 134}
 135
 136/**
 137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 138 *
 139 * @bp:        driver handle
 140 * @buf:       character buffer to fill with the fw name
 141 * @buf_len:   length of the above buffer
 142 *
 143 */
 144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 145{
 146        if (IS_PF(bp)) {
 147                u8 phy_fw_ver[PHY_FW_VER_LEN];
 148
 149                phy_fw_ver[0] = '\0';
 150                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 151                                             phy_fw_ver, PHY_FW_VER_LEN);
 152                strlcpy(buf, bp->fw_ver, buf_len);
 153                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 154                         "bc %d.%d.%d%s%s",
 155                         (bp->common.bc_ver & 0xff0000) >> 16,
 156                         (bp->common.bc_ver & 0xff00) >> 8,
 157                         (bp->common.bc_ver & 0xff),
 158                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 159        } else {
 160                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 161        }
 162}
 163
 164/**
 165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 166 *
 167 * @bp: driver handle
 168 * @delta:      number of eth queues which were not allocated
 169 */
 170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 171{
 172        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 173
 174        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 175         * backward along the array could cause memory to be overridden
 176         */
 177        for (cos = 1; cos < bp->max_cos; cos++) {
 178                for (i = 0; i < old_eth_num - delta; i++) {
 179                        struct bnx2x_fastpath *fp = &bp->fp[i];
 180                        int new_idx = cos * (old_eth_num - delta) + i;
 181
 182                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 183                               sizeof(struct bnx2x_fp_txdata));
 184                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 185                }
 186        }
 187}
 188
 189int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 190
 191/* free skb in the packet ring at pos idx
 192 * return idx of last bd freed
 193 */
 194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 195                             u16 idx, unsigned int *pkts_compl,
 196                             unsigned int *bytes_compl)
 197{
 198        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 199        struct eth_tx_start_bd *tx_start_bd;
 200        struct eth_tx_bd *tx_data_bd;
 201        struct sk_buff *skb = tx_buf->skb;
 202        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 203        int nbd;
 204        u16 split_bd_len = 0;
 205
 206        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 207        prefetch(&skb->end);
 208
 209        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 210           txdata->txq_index, idx, tx_buf, skb);
 211
 212        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 213
 214        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 215#ifdef BNX2X_STOP_ON_ERROR
 216        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 217                BNX2X_ERR("BAD nbd!\n");
 218                bnx2x_panic();
 219        }
 220#endif
 221        new_cons = nbd + tx_buf->first_bd;
 222
 223        /* Get the next bd */
 224        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 225
 226        /* Skip a parse bd... */
 227        --nbd;
 228        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 229
 230        if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
 231                /* Skip second parse bd... */
 232                --nbd;
 233                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 234        }
 235
 236        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 237        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 238                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 239                split_bd_len = BD_UNMAP_LEN(tx_data_bd);
 240                --nbd;
 241                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 242        }
 243
 244        /* unmap first bd */
 245        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 246                         BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
 247                         DMA_TO_DEVICE);
 248
 249        /* now free frags */
 250        while (nbd > 0) {
 251
 252                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 253                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 254                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 255                if (--nbd)
 256                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 257        }
 258
 259        /* release skb */
 260        WARN_ON(!skb);
 261        if (likely(skb)) {
 262                (*pkts_compl)++;
 263                (*bytes_compl) += skb->len;
 264                dev_kfree_skb_any(skb);
 265        }
 266
 267        tx_buf->first_bd = 0;
 268        tx_buf->skb = NULL;
 269
 270        return new_cons;
 271}
 272
 273int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 274{
 275        struct netdev_queue *txq;
 276        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 277        unsigned int pkts_compl = 0, bytes_compl = 0;
 278
 279#ifdef BNX2X_STOP_ON_ERROR
 280        if (unlikely(bp->panic))
 281                return -1;
 282#endif
 283
 284        txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 285        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 286        sw_cons = txdata->tx_pkt_cons;
 287
 288        /* Ensure subsequent loads occur after hw_cons */
 289        smp_rmb();
 290
 291        while (sw_cons != hw_cons) {
 292                u16 pkt_cons;
 293
 294                pkt_cons = TX_BD(sw_cons);
 295
 296                DP(NETIF_MSG_TX_DONE,
 297                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 298                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 299
 300                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 301                                            &pkts_compl, &bytes_compl);
 302
 303                sw_cons++;
 304        }
 305
 306        netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 307
 308        txdata->tx_pkt_cons = sw_cons;
 309        txdata->tx_bd_cons = bd_cons;
 310
 311        /* Need to make the tx_bd_cons update visible to start_xmit()
 312         * before checking for netif_tx_queue_stopped().  Without the
 313         * memory barrier, there is a small possibility that
 314         * start_xmit() will miss it and cause the queue to be stopped
 315         * forever.
 316         * On the other hand we need an rmb() here to ensure the proper
 317         * ordering of bit testing in the following
 318         * netif_tx_queue_stopped(txq) call.
 319         */
 320        smp_mb();
 321
 322        if (unlikely(netif_tx_queue_stopped(txq))) {
 323                /* Taking tx_lock() is needed to prevent re-enabling the queue
 324                 * while it's empty. This could have happen if rx_action() gets
 325                 * suspended in bnx2x_tx_int() after the condition before
 326                 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 327                 *
 328                 * stops the queue->sees fresh tx_bd_cons->releases the queue->
 329                 * sends some packets consuming the whole queue again->
 330                 * stops the queue
 331                 */
 332
 333                __netif_tx_lock(txq, smp_processor_id());
 334
 335                if ((netif_tx_queue_stopped(txq)) &&
 336                    (bp->state == BNX2X_STATE_OPEN) &&
 337                    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
 338                        netif_tx_wake_queue(txq);
 339
 340                __netif_tx_unlock(txq);
 341        }
 342        return 0;
 343}
 344
 345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 346                                             u16 idx)
 347{
 348        u16 last_max = fp->last_max_sge;
 349
 350        if (SUB_S16(idx, last_max) > 0)
 351                fp->last_max_sge = idx;
 352}
 353
 354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 355                                         u16 sge_len,
 356                                         struct eth_end_agg_rx_cqe *cqe)
 357{
 358        struct bnx2x *bp = fp->bp;
 359        u16 last_max, last_elem, first_elem;
 360        u16 delta = 0;
 361        u16 i;
 362
 363        if (!sge_len)
 364                return;
 365
 366        /* First mark all used pages */
 367        for (i = 0; i < sge_len; i++)
 368                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 369                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 370
 371        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 372           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 373
 374        /* Here we assume that the last SGE index is the biggest */
 375        prefetch((void *)(fp->sge_mask));
 376        bnx2x_update_last_max_sge(fp,
 377                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 378
 379        last_max = RX_SGE(fp->last_max_sge);
 380        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 381        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 382
 383        /* If ring is not full */
 384        if (last_elem + 1 != first_elem)
 385                last_elem++;
 386
 387        /* Now update the prod */
 388        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 389                if (likely(fp->sge_mask[i]))
 390                        break;
 391
 392                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 393                delta += BIT_VEC64_ELEM_SZ;
 394        }
 395
 396        if (delta > 0) {
 397                fp->rx_sge_prod += delta;
 398                /* clear page-end entries */
 399                bnx2x_clear_sge_mask_next_elems(fp);
 400        }
 401
 402        DP(NETIF_MSG_RX_STATUS,
 403           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 404           fp->last_max_sge, fp->rx_sge_prod);
 405}
 406
 407/* Get Toeplitz hash value in the skb using the value from the
 408 * CQE (calculated by HW).
 409 */
 410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 411                            const struct eth_fast_path_rx_cqe *cqe,
 412                            enum pkt_hash_types *rxhash_type)
 413{
 414        /* Get Toeplitz hash from CQE */
 415        if ((bp->dev->features & NETIF_F_RXHASH) &&
 416            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 417                enum eth_rss_hash_type htype;
 418
 419                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 420                *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 421                                (htype == TCP_IPV6_HASH_TYPE)) ?
 422                               PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 423
 424                return le32_to_cpu(cqe->rss_hash_result);
 425        }
 426        *rxhash_type = PKT_HASH_TYPE_NONE;
 427        return 0;
 428}
 429
 430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 431                            u16 cons, u16 prod,
 432                            struct eth_fast_path_rx_cqe *cqe)
 433{
 434        struct bnx2x *bp = fp->bp;
 435        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 436        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 437        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 438        dma_addr_t mapping;
 439        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 440        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 441
 442        /* print error if current state != stop */
 443        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 444                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 445
 446        /* Try to map an empty data buffer from the aggregation info  */
 447        mapping = dma_map_single(&bp->pdev->dev,
 448                                 first_buf->data + NET_SKB_PAD,
 449                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 450        /*
 451         *  ...if it fails - move the skb from the consumer to the producer
 452         *  and set the current aggregation state as ERROR to drop it
 453         *  when TPA_STOP arrives.
 454         */
 455
 456        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 457                /* Move the BD from the consumer to the producer */
 458                bnx2x_reuse_rx_data(fp, cons, prod);
 459                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 460                return;
 461        }
 462
 463        /* move empty data from pool to prod */
 464        prod_rx_buf->data = first_buf->data;
 465        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 466        /* point prod_bd to new data */
 467        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 468        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 469
 470        /* move partial skb from cons to pool (don't unmap yet) */
 471        *first_buf = *cons_rx_buf;
 472
 473        /* mark bin state as START */
 474        tpa_info->parsing_flags =
 475                le16_to_cpu(cqe->pars_flags.flags);
 476        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 477        tpa_info->tpa_state = BNX2X_TPA_START;
 478        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 479        tpa_info->placement_offset = cqe->placement_offset;
 480        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 481        if (fp->mode == TPA_MODE_GRO) {
 482                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 483                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 484                tpa_info->gro_size = gro_size;
 485        }
 486
 487#ifdef BNX2X_STOP_ON_ERROR
 488        fp->tpa_queue_used |= (1 << queue);
 489        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 490           fp->tpa_queue_used);
 491#endif
 492}
 493
 494/* Timestamp option length allowed for TPA aggregation:
 495 *
 496 *              nop nop kind length echo val
 497 */
 498#define TPA_TSTAMP_OPT_LEN      12
 499/**
 500 * bnx2x_set_gro_params - compute GRO values
 501 *
 502 * @skb:                packet skb
 503 * @parsing_flags:      parsing flags from the START CQE
 504 * @len_on_bd:          total length of the first packet for the
 505 *                      aggregation.
 506 * @pkt_len:            length of all segments
 507 * @num_of_coalesced_segs: count of segments
 508 *
 509 * Approximate value of the MSS for this aggregation calculated using
 510 * the first packet of it.
 511 * Compute number of aggregated segments, and gso_type.
 512 */
 513static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 514                                 u16 len_on_bd, unsigned int pkt_len,
 515                                 u16 num_of_coalesced_segs)
 516{
 517        /* TPA aggregation won't have either IP options or TCP options
 518         * other than timestamp or IPv6 extension headers.
 519         */
 520        u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 521
 522        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 523            PRS_FLAG_OVERETH_IPV6) {
 524                hdrs_len += sizeof(struct ipv6hdr);
 525                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 526        } else {
 527                hdrs_len += sizeof(struct iphdr);
 528                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 529        }
 530
 531        /* Check if there was a TCP timestamp, if there is it's will
 532         * always be 12 bytes length: nop nop kind length echo val.
 533         *
 534         * Otherwise FW would close the aggregation.
 535         */
 536        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 537                hdrs_len += TPA_TSTAMP_OPT_LEN;
 538
 539        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 540
 541        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 542         * to skb_shinfo(skb)->gso_segs
 543         */
 544        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 545}
 546
 547static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 548                              u16 index, gfp_t gfp_mask)
 549{
 550        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 551        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 552        struct bnx2x_alloc_pool *pool = &fp->page_pool;
 553        dma_addr_t mapping;
 554
 555        if (!pool->page) {
 556                pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
 557                if (unlikely(!pool->page))
 558                        return -ENOMEM;
 559
 560                pool->offset = 0;
 561        }
 562
 563        mapping = dma_map_page(&bp->pdev->dev, pool->page,
 564                               pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 565        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 566                BNX2X_ERR("Can't map sge\n");
 567                return -ENOMEM;
 568        }
 569
 570        sw_buf->page = pool->page;
 571        sw_buf->offset = pool->offset;
 572
 573        dma_unmap_addr_set(sw_buf, mapping, mapping);
 574
 575        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 576        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 577
 578        pool->offset += SGE_PAGE_SIZE;
 579        if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
 580                get_page(pool->page);
 581        else
 582                pool->page = NULL;
 583        return 0;
 584}
 585
 586static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 587                               struct bnx2x_agg_info *tpa_info,
 588                               u16 pages,
 589                               struct sk_buff *skb,
 590                               struct eth_end_agg_rx_cqe *cqe,
 591                               u16 cqe_idx)
 592{
 593        struct sw_rx_page *rx_pg, old_rx_pg;
 594        u32 i, frag_len, frag_size;
 595        int err, j, frag_id = 0;
 596        u16 len_on_bd = tpa_info->len_on_bd;
 597        u16 full_page = 0, gro_size = 0;
 598
 599        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 600
 601        if (fp->mode == TPA_MODE_GRO) {
 602                gro_size = tpa_info->gro_size;
 603                full_page = tpa_info->full_page;
 604        }
 605
 606        /* This is needed in order to enable forwarding support */
 607        if (frag_size)
 608                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 609                                     le16_to_cpu(cqe->pkt_len),
 610                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 611
 612#ifdef BNX2X_STOP_ON_ERROR
 613        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 614                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 615                          pages, cqe_idx);
 616                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 617                bnx2x_panic();
 618                return -EINVAL;
 619        }
 620#endif
 621
 622        /* Run through the SGL and compose the fragmented skb */
 623        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 624                u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 625
 626                /* FW gives the indices of the SGE as if the ring is an array
 627                   (meaning that "next" element will consume 2 indices) */
 628                if (fp->mode == TPA_MODE_GRO)
 629                        frag_len = min_t(u32, frag_size, (u32)full_page);
 630                else /* LRO */
 631                        frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
 632
 633                rx_pg = &fp->rx_page_ring[sge_idx];
 634                old_rx_pg = *rx_pg;
 635
 636                /* If we fail to allocate a substitute page, we simply stop
 637                   where we are and drop the whole packet */
 638                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
 639                if (unlikely(err)) {
 640                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 641                        return err;
 642                }
 643
 644                dma_unmap_page(&bp->pdev->dev,
 645                               dma_unmap_addr(&old_rx_pg, mapping),
 646                               SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 647                /* Add one frag and update the appropriate fields in the skb */
 648                if (fp->mode == TPA_MODE_LRO)
 649                        skb_fill_page_desc(skb, j, old_rx_pg.page,
 650                                           old_rx_pg.offset, frag_len);
 651                else { /* GRO */
 652                        int rem;
 653                        int offset = 0;
 654                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 655                                int len = rem > gro_size ? gro_size : rem;
 656                                skb_fill_page_desc(skb, frag_id++,
 657                                                   old_rx_pg.page,
 658                                                   old_rx_pg.offset + offset,
 659                                                   len);
 660                                if (offset)
 661                                        get_page(old_rx_pg.page);
 662                                offset += len;
 663                        }
 664                }
 665
 666                skb->data_len += frag_len;
 667                skb->truesize += SGE_PAGES;
 668                skb->len += frag_len;
 669
 670                frag_size -= frag_len;
 671        }
 672
 673        return 0;
 674}
 675
 676static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 677{
 678        if (fp->rx_frag_size)
 679                skb_free_frag(data);
 680        else
 681                kfree(data);
 682}
 683
 684static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 685{
 686        if (fp->rx_frag_size) {
 687                /* GFP_KERNEL allocations are used only during initialization */
 688                if (unlikely(gfpflags_allow_blocking(gfp_mask)))
 689                        return (void *)__get_free_page(gfp_mask);
 690
 691                return napi_alloc_frag(fp->rx_frag_size);
 692        }
 693
 694        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 695}
 696
 697#ifdef CONFIG_INET
 698static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 699{
 700        const struct iphdr *iph = ip_hdr(skb);
 701        struct tcphdr *th;
 702
 703        skb_set_transport_header(skb, sizeof(struct iphdr));
 704        th = tcp_hdr(skb);
 705
 706        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 707                                  iph->saddr, iph->daddr, 0);
 708}
 709
 710static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 711{
 712        struct ipv6hdr *iph = ipv6_hdr(skb);
 713        struct tcphdr *th;
 714
 715        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 716        th = tcp_hdr(skb);
 717
 718        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 719                                  &iph->saddr, &iph->daddr, 0);
 720}
 721
 722static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 723                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 724{
 725        skb_reset_network_header(skb);
 726        gro_func(bp, skb);
 727        tcp_gro_complete(skb);
 728}
 729#endif
 730
 731static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 732                               struct sk_buff *skb)
 733{
 734#ifdef CONFIG_INET
 735        if (skb_shinfo(skb)->gso_size) {
 736                switch (be16_to_cpu(skb->protocol)) {
 737                case ETH_P_IP:
 738                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 739                        break;
 740                case ETH_P_IPV6:
 741                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 742                        break;
 743                default:
 744                        netdev_WARN_ONCE(bp->dev,
 745                                         "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 746                                         be16_to_cpu(skb->protocol));
 747                }
 748        }
 749#endif
 750        skb_record_rx_queue(skb, fp->rx_queue);
 751        napi_gro_receive(&fp->napi, skb);
 752}
 753
 754static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 755                           struct bnx2x_agg_info *tpa_info,
 756                           u16 pages,
 757                           struct eth_end_agg_rx_cqe *cqe,
 758                           u16 cqe_idx)
 759{
 760        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 761        u8 pad = tpa_info->placement_offset;
 762        u16 len = tpa_info->len_on_bd;
 763        struct sk_buff *skb = NULL;
 764        u8 *new_data, *data = rx_buf->data;
 765        u8 old_tpa_state = tpa_info->tpa_state;
 766
 767        tpa_info->tpa_state = BNX2X_TPA_STOP;
 768
 769        /* If we there was an error during the handling of the TPA_START -
 770         * drop this aggregation.
 771         */
 772        if (old_tpa_state == BNX2X_TPA_ERROR)
 773                goto drop;
 774
 775        /* Try to allocate the new data */
 776        new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
 777        /* Unmap skb in the pool anyway, as we are going to change
 778           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 779           fails. */
 780        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 781                         fp->rx_buf_size, DMA_FROM_DEVICE);
 782        if (likely(new_data))
 783                skb = build_skb(data, fp->rx_frag_size);
 784
 785        if (likely(skb)) {
 786#ifdef BNX2X_STOP_ON_ERROR
 787                if (pad + len > fp->rx_buf_size) {
 788                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 789                                  pad, len, fp->rx_buf_size);
 790                        bnx2x_panic();
 791                        return;
 792                }
 793#endif
 794
 795                skb_reserve(skb, pad + NET_SKB_PAD);
 796                skb_put(skb, len);
 797                skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 798
 799                skb->protocol = eth_type_trans(skb, bp->dev);
 800                skb->ip_summed = CHECKSUM_UNNECESSARY;
 801
 802                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 803                                         skb, cqe, cqe_idx)) {
 804                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 805                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
 806                        bnx2x_gro_receive(bp, fp, skb);
 807                } else {
 808                        DP(NETIF_MSG_RX_STATUS,
 809                           "Failed to allocate new pages - dropping packet!\n");
 810                        dev_kfree_skb_any(skb);
 811                }
 812
 813                /* put new data in bin */
 814                rx_buf->data = new_data;
 815
 816                return;
 817        }
 818        if (new_data)
 819                bnx2x_frag_free(fp, new_data);
 820drop:
 821        /* drop the packet and keep the buffer in the bin */
 822        DP(NETIF_MSG_RX_STATUS,
 823           "Failed to allocate or map a new skb - dropping packet!\n");
 824        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 825}
 826
 827static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 828                               u16 index, gfp_t gfp_mask)
 829{
 830        u8 *data;
 831        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 832        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 833        dma_addr_t mapping;
 834
 835        data = bnx2x_frag_alloc(fp, gfp_mask);
 836        if (unlikely(data == NULL))
 837                return -ENOMEM;
 838
 839        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 840                                 fp->rx_buf_size,
 841                                 DMA_FROM_DEVICE);
 842        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 843                bnx2x_frag_free(fp, data);
 844                BNX2X_ERR("Can't map rx data\n");
 845                return -ENOMEM;
 846        }
 847
 848        rx_buf->data = data;
 849        dma_unmap_addr_set(rx_buf, mapping, mapping);
 850
 851        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 852        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 853
 854        return 0;
 855}
 856
 857static
 858void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 859                                 struct bnx2x_fastpath *fp,
 860                                 struct bnx2x_eth_q_stats *qstats)
 861{
 862        /* Do nothing if no L4 csum validation was done.
 863         * We do not check whether IP csum was validated. For IPv4 we assume
 864         * that if the card got as far as validating the L4 csum, it also
 865         * validated the IP csum. IPv6 has no IP csum.
 866         */
 867        if (cqe->fast_path_cqe.status_flags &
 868            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 869                return;
 870
 871        /* If L4 validation was done, check if an error was found. */
 872
 873        if (cqe->fast_path_cqe.type_error_flags &
 874            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 875             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 876                qstats->hw_csum_err++;
 877        else
 878                skb->ip_summed = CHECKSUM_UNNECESSARY;
 879}
 880
 881static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 882{
 883        struct bnx2x *bp = fp->bp;
 884        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 885        u16 sw_comp_cons, sw_comp_prod;
 886        int rx_pkt = 0;
 887        union eth_rx_cqe *cqe;
 888        struct eth_fast_path_rx_cqe *cqe_fp;
 889
 890#ifdef BNX2X_STOP_ON_ERROR
 891        if (unlikely(bp->panic))
 892                return 0;
 893#endif
 894        if (budget <= 0)
 895                return rx_pkt;
 896
 897        bd_cons = fp->rx_bd_cons;
 898        bd_prod = fp->rx_bd_prod;
 899        bd_prod_fw = bd_prod;
 900        sw_comp_cons = fp->rx_comp_cons;
 901        sw_comp_prod = fp->rx_comp_prod;
 902
 903        comp_ring_cons = RCQ_BD(sw_comp_cons);
 904        cqe = &fp->rx_comp_ring[comp_ring_cons];
 905        cqe_fp = &cqe->fast_path_cqe;
 906
 907        DP(NETIF_MSG_RX_STATUS,
 908           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 909
 910        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 911                struct sw_rx_bd *rx_buf = NULL;
 912                struct sk_buff *skb;
 913                u8 cqe_fp_flags;
 914                enum eth_rx_cqe_type cqe_fp_type;
 915                u16 len, pad, queue;
 916                u8 *data;
 917                u32 rxhash;
 918                enum pkt_hash_types rxhash_type;
 919
 920#ifdef BNX2X_STOP_ON_ERROR
 921                if (unlikely(bp->panic))
 922                        return 0;
 923#endif
 924
 925                bd_prod = RX_BD(bd_prod);
 926                bd_cons = RX_BD(bd_cons);
 927
 928                /* A rmb() is required to ensure that the CQE is not read
 929                 * before it is written by the adapter DMA.  PCI ordering
 930                 * rules will make sure the other fields are written before
 931                 * the marker at the end of struct eth_fast_path_rx_cqe
 932                 * but without rmb() a weakly ordered processor can process
 933                 * stale data.  Without the barrier TPA state-machine might
 934                 * enter inconsistent state and kernel stack might be
 935                 * provided with incorrect packet description - these lead
 936                 * to various kernel crashed.
 937                 */
 938                rmb();
 939
 940                cqe_fp_flags = cqe_fp->type_error_flags;
 941                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 942
 943                DP(NETIF_MSG_RX_STATUS,
 944                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 945                   CQE_TYPE(cqe_fp_flags),
 946                   cqe_fp_flags, cqe_fp->status_flags,
 947                   le32_to_cpu(cqe_fp->rss_hash_result),
 948                   le16_to_cpu(cqe_fp->vlan_tag),
 949                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 950
 951                /* is this a slowpath msg? */
 952                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 953                        bnx2x_sp_event(fp, cqe);
 954                        goto next_cqe;
 955                }
 956
 957                rx_buf = &fp->rx_buf_ring[bd_cons];
 958                data = rx_buf->data;
 959
 960                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 961                        struct bnx2x_agg_info *tpa_info;
 962                        u16 frag_size, pages;
 963#ifdef BNX2X_STOP_ON_ERROR
 964                        /* sanity check */
 965                        if (fp->mode == TPA_MODE_DISABLED &&
 966                            (CQE_TYPE_START(cqe_fp_type) ||
 967                             CQE_TYPE_STOP(cqe_fp_type)))
 968                                BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
 969                                          CQE_TYPE(cqe_fp_type));
 970#endif
 971
 972                        if (CQE_TYPE_START(cqe_fp_type)) {
 973                                u16 queue = cqe_fp->queue_index;
 974                                DP(NETIF_MSG_RX_STATUS,
 975                                   "calling tpa_start on queue %d\n",
 976                                   queue);
 977
 978                                bnx2x_tpa_start(fp, queue,
 979                                                bd_cons, bd_prod,
 980                                                cqe_fp);
 981
 982                                goto next_rx;
 983                        }
 984                        queue = cqe->end_agg_cqe.queue_index;
 985                        tpa_info = &fp->tpa_info[queue];
 986                        DP(NETIF_MSG_RX_STATUS,
 987                           "calling tpa_stop on queue %d\n",
 988                           queue);
 989
 990                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 991                                    tpa_info->len_on_bd;
 992
 993                        if (fp->mode == TPA_MODE_GRO)
 994                                pages = (frag_size + tpa_info->full_page - 1) /
 995                                         tpa_info->full_page;
 996                        else
 997                                pages = SGE_PAGE_ALIGN(frag_size) >>
 998                                        SGE_PAGE_SHIFT;
 999
1000                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1001                                       &cqe->end_agg_cqe, comp_ring_cons);
1002#ifdef BNX2X_STOP_ON_ERROR
1003                        if (bp->panic)
1004                                return 0;
1005#endif
1006
1007                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1008                        goto next_cqe;
1009                }
1010                /* non TPA */
1011                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1012                pad = cqe_fp->placement_offset;
1013                dma_sync_single_for_cpu(&bp->pdev->dev,
1014                                        dma_unmap_addr(rx_buf, mapping),
1015                                        pad + RX_COPY_THRESH,
1016                                        DMA_FROM_DEVICE);
1017                pad += NET_SKB_PAD;
1018                prefetch(data + pad); /* speedup eth_type_trans() */
1019                /* is this an error packet? */
1020                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1021                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1022                           "ERROR  flags %x  rx packet %u\n",
1023                           cqe_fp_flags, sw_comp_cons);
1024                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1025                        goto reuse_rx;
1026                }
1027
1028                /* Since we don't have a jumbo ring
1029                 * copy small packets if mtu > 1500
1030                 */
1031                if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1032                    (len <= RX_COPY_THRESH)) {
1033                        skb = napi_alloc_skb(&fp->napi, len);
1034                        if (skb == NULL) {
1035                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1036                                   "ERROR  packet dropped because of alloc failure\n");
1037                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1038                                goto reuse_rx;
1039                        }
1040                        memcpy(skb->data, data + pad, len);
1041                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1042                } else {
1043                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1044                                                       GFP_ATOMIC) == 0)) {
1045                                dma_unmap_single(&bp->pdev->dev,
1046                                                 dma_unmap_addr(rx_buf, mapping),
1047                                                 fp->rx_buf_size,
1048                                                 DMA_FROM_DEVICE);
1049                                skb = build_skb(data, fp->rx_frag_size);
1050                                if (unlikely(!skb)) {
1051                                        bnx2x_frag_free(fp, data);
1052                                        bnx2x_fp_qstats(bp, fp)->
1053                                                        rx_skb_alloc_failed++;
1054                                        goto next_rx;
1055                                }
1056                                skb_reserve(skb, pad);
1057                        } else {
1058                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1059                                   "ERROR  packet dropped because of alloc failure\n");
1060                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1061reuse_rx:
1062                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1063                                goto next_rx;
1064                        }
1065                }
1066
1067                skb_put(skb, len);
1068                skb->protocol = eth_type_trans(skb, bp->dev);
1069
1070                /* Set Toeplitz hash for a none-LRO skb */
1071                rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1072                skb_set_hash(skb, rxhash, rxhash_type);
1073
1074                skb_checksum_none_assert(skb);
1075
1076                if (bp->dev->features & NETIF_F_RXCSUM)
1077                        bnx2x_csum_validate(skb, cqe, fp,
1078                                            bnx2x_fp_qstats(bp, fp));
1079
1080                skb_record_rx_queue(skb, fp->rx_queue);
1081
1082                /* Check if this packet was timestamped */
1083                if (unlikely(cqe->fast_path_cqe.type_error_flags &
1084                             (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1085                        bnx2x_set_rx_ts(bp, skb);
1086
1087                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1088                    PARSING_FLAGS_VLAN)
1089                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1090                                               le16_to_cpu(cqe_fp->vlan_tag));
1091
1092                napi_gro_receive(&fp->napi, skb);
1093next_rx:
1094                rx_buf->data = NULL;
1095
1096                bd_cons = NEXT_RX_IDX(bd_cons);
1097                bd_prod = NEXT_RX_IDX(bd_prod);
1098                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1099                rx_pkt++;
1100next_cqe:
1101                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1102                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1103
1104                /* mark CQE as free */
1105                BNX2X_SEED_CQE(cqe_fp);
1106
1107                if (rx_pkt == budget)
1108                        break;
1109
1110                comp_ring_cons = RCQ_BD(sw_comp_cons);
1111                cqe = &fp->rx_comp_ring[comp_ring_cons];
1112                cqe_fp = &cqe->fast_path_cqe;
1113        } /* while */
1114
1115        fp->rx_bd_cons = bd_cons;
1116        fp->rx_bd_prod = bd_prod_fw;
1117        fp->rx_comp_cons = sw_comp_cons;
1118        fp->rx_comp_prod = sw_comp_prod;
1119
1120        /* Update producers */
1121        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1122                             fp->rx_sge_prod);
1123
1124        return rx_pkt;
1125}
1126
1127static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1128{
1129        struct bnx2x_fastpath *fp = fp_cookie;
1130        struct bnx2x *bp = fp->bp;
1131        u8 cos;
1132
1133        DP(NETIF_MSG_INTR,
1134           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1135           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1136
1137        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1138
1139#ifdef BNX2X_STOP_ON_ERROR
1140        if (unlikely(bp->panic))
1141                return IRQ_HANDLED;
1142#endif
1143
1144        /* Handle Rx and Tx according to MSI-X vector */
1145        for_each_cos_in_tx_queue(fp, cos)
1146                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1147
1148        prefetch(&fp->sb_running_index[SM_RX_ID]);
1149        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1150
1151        return IRQ_HANDLED;
1152}
1153
1154/* HW Lock for shared dual port PHYs */
1155void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1156{
1157        mutex_lock(&bp->port.phy_mutex);
1158
1159        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1160}
1161
1162void bnx2x_release_phy_lock(struct bnx2x *bp)
1163{
1164        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1165
1166        mutex_unlock(&bp->port.phy_mutex);
1167}
1168
1169/* calculates MF speed according to current linespeed and MF configuration */
1170u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1171{
1172        u16 line_speed = bp->link_vars.line_speed;
1173        if (IS_MF(bp)) {
1174                u16 maxCfg = bnx2x_extract_max_cfg(bp,
1175                                                   bp->mf_config[BP_VN(bp)]);
1176
1177                /* Calculate the current MAX line speed limit for the MF
1178                 * devices
1179                 */
1180                if (IS_MF_PERCENT_BW(bp))
1181                        line_speed = (line_speed * maxCfg) / 100;
1182                else { /* SD mode */
1183                        u16 vn_max_rate = maxCfg * 100;
1184
1185                        if (vn_max_rate < line_speed)
1186                                line_speed = vn_max_rate;
1187                }
1188        }
1189
1190        return line_speed;
1191}
1192
1193/**
1194 * bnx2x_fill_report_data - fill link report data to report
1195 *
1196 * @bp:         driver handle
1197 * @data:       link state to update
1198 *
1199 * It uses a none-atomic bit operations because is called under the mutex.
1200 */
1201static void bnx2x_fill_report_data(struct bnx2x *bp,
1202                                   struct bnx2x_link_report_data *data)
1203{
1204        memset(data, 0, sizeof(*data));
1205
1206        if (IS_PF(bp)) {
1207                /* Fill the report data: effective line speed */
1208                data->line_speed = bnx2x_get_mf_speed(bp);
1209
1210                /* Link is down */
1211                if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1212                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213                                  &data->link_report_flags);
1214
1215                if (!BNX2X_NUM_ETH_QUEUES(bp))
1216                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1217                                  &data->link_report_flags);
1218
1219                /* Full DUPLEX */
1220                if (bp->link_vars.duplex == DUPLEX_FULL)
1221                        __set_bit(BNX2X_LINK_REPORT_FD,
1222                                  &data->link_report_flags);
1223
1224                /* Rx Flow Control is ON */
1225                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1226                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1227                                  &data->link_report_flags);
1228
1229                /* Tx Flow Control is ON */
1230                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1231                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1232                                  &data->link_report_flags);
1233        } else { /* VF */
1234                *data = bp->vf_link_vars;
1235        }
1236}
1237
1238/**
1239 * bnx2x_link_report - report link status to OS.
1240 *
1241 * @bp:         driver handle
1242 *
1243 * Calls the __bnx2x_link_report() under the same locking scheme
1244 * as a link/PHY state managing code to ensure a consistent link
1245 * reporting.
1246 */
1247
1248void bnx2x_link_report(struct bnx2x *bp)
1249{
1250        bnx2x_acquire_phy_lock(bp);
1251        __bnx2x_link_report(bp);
1252        bnx2x_release_phy_lock(bp);
1253}
1254
1255/**
1256 * __bnx2x_link_report - report link status to OS.
1257 *
1258 * @bp:         driver handle
1259 *
1260 * None atomic implementation.
1261 * Should be called under the phy_lock.
1262 */
1263void __bnx2x_link_report(struct bnx2x *bp)
1264{
1265        struct bnx2x_link_report_data cur_data;
1266
1267        if (bp->force_link_down) {
1268                bp->link_vars.link_up = 0;
1269                return;
1270        }
1271
1272        /* reread mf_cfg */
1273        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1274                bnx2x_read_mf_cfg(bp);
1275
1276        /* Read the current link report info */
1277        bnx2x_fill_report_data(bp, &cur_data);
1278
1279        /* Don't report link down or exactly the same link status twice */
1280        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1281            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1282                      &bp->last_reported_link.link_report_flags) &&
1283             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1284                      &cur_data.link_report_flags)))
1285                return;
1286
1287        bp->link_cnt++;
1288
1289        /* We are going to report a new link parameters now -
1290         * remember the current data for the next time.
1291         */
1292        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1293
1294        /* propagate status to VFs */
1295        if (IS_PF(bp))
1296                bnx2x_iov_link_update(bp);
1297
1298        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1299                     &cur_data.link_report_flags)) {
1300                netif_carrier_off(bp->dev);
1301                netdev_err(bp->dev, "NIC Link is Down\n");
1302                return;
1303        } else {
1304                const char *duplex;
1305                const char *flow;
1306
1307                netif_carrier_on(bp->dev);
1308
1309                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1310                                       &cur_data.link_report_flags))
1311                        duplex = "full";
1312                else
1313                        duplex = "half";
1314
1315                /* Handle the FC at the end so that only these flags would be
1316                 * possibly set. This way we may easily check if there is no FC
1317                 * enabled.
1318                 */
1319                if (cur_data.link_report_flags) {
1320                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1321                                     &cur_data.link_report_flags)) {
1322                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1323                                     &cur_data.link_report_flags))
1324                                        flow = "ON - receive & transmit";
1325                                else
1326                                        flow = "ON - receive";
1327                        } else {
1328                                flow = "ON - transmit";
1329                        }
1330                } else {
1331                        flow = "none";
1332                }
1333                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1334                            cur_data.line_speed, duplex, flow);
1335        }
1336}
1337
1338static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1339{
1340        int i;
1341
1342        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1343                struct eth_rx_sge *sge;
1344
1345                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1346                sge->addr_hi =
1347                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1348                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1349
1350                sge->addr_lo =
1351                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1352                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1353        }
1354}
1355
1356static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1357                                struct bnx2x_fastpath *fp, int last)
1358{
1359        int i;
1360
1361        for (i = 0; i < last; i++) {
1362                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1363                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1364                u8 *data = first_buf->data;
1365
1366                if (data == NULL) {
1367                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1368                        continue;
1369                }
1370                if (tpa_info->tpa_state == BNX2X_TPA_START)
1371                        dma_unmap_single(&bp->pdev->dev,
1372                                         dma_unmap_addr(first_buf, mapping),
1373                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1374                bnx2x_frag_free(fp, data);
1375                first_buf->data = NULL;
1376        }
1377}
1378
1379void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1380{
1381        int j;
1382
1383        for_each_rx_queue_cnic(bp, j) {
1384                struct bnx2x_fastpath *fp = &bp->fp[j];
1385
1386                fp->rx_bd_cons = 0;
1387
1388                /* Activate BD ring */
1389                /* Warning!
1390                 * this will generate an interrupt (to the TSTORM)
1391                 * must only be done after chip is initialized
1392                 */
1393                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1394                                     fp->rx_sge_prod);
1395        }
1396}
1397
1398void bnx2x_init_rx_rings(struct bnx2x *bp)
1399{
1400        int func = BP_FUNC(bp);
1401        u16 ring_prod;
1402        int i, j;
1403
1404        /* Allocate TPA resources */
1405        for_each_eth_queue(bp, j) {
1406                struct bnx2x_fastpath *fp = &bp->fp[j];
1407
1408                DP(NETIF_MSG_IFUP,
1409                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1410
1411                if (fp->mode != TPA_MODE_DISABLED) {
1412                        /* Fill the per-aggregation pool */
1413                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1414                                struct bnx2x_agg_info *tpa_info =
1415                                        &fp->tpa_info[i];
1416                                struct sw_rx_bd *first_buf =
1417                                        &tpa_info->first_buf;
1418
1419                                first_buf->data =
1420                                        bnx2x_frag_alloc(fp, GFP_KERNEL);
1421                                if (!first_buf->data) {
1422                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1423                                                  j);
1424                                        bnx2x_free_tpa_pool(bp, fp, i);
1425                                        fp->mode = TPA_MODE_DISABLED;
1426                                        break;
1427                                }
1428                                dma_unmap_addr_set(first_buf, mapping, 0);
1429                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1430                        }
1431
1432                        /* "next page" elements initialization */
1433                        bnx2x_set_next_page_sgl(fp);
1434
1435                        /* set SGEs bit mask */
1436                        bnx2x_init_sge_ring_bit_mask(fp);
1437
1438                        /* Allocate SGEs and initialize the ring elements */
1439                        for (i = 0, ring_prod = 0;
1440                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1441
1442                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1443                                                       GFP_KERNEL) < 0) {
1444                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1445                                                  i);
1446                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1447                                                  j);
1448                                        /* Cleanup already allocated elements */
1449                                        bnx2x_free_rx_sge_range(bp, fp,
1450                                                                ring_prod);
1451                                        bnx2x_free_tpa_pool(bp, fp,
1452                                                            MAX_AGG_QS(bp));
1453                                        fp->mode = TPA_MODE_DISABLED;
1454                                        ring_prod = 0;
1455                                        break;
1456                                }
1457                                ring_prod = NEXT_SGE_IDX(ring_prod);
1458                        }
1459
1460                        fp->rx_sge_prod = ring_prod;
1461                }
1462        }
1463
1464        for_each_eth_queue(bp, j) {
1465                struct bnx2x_fastpath *fp = &bp->fp[j];
1466
1467                fp->rx_bd_cons = 0;
1468
1469                /* Activate BD ring */
1470                /* Warning!
1471                 * this will generate an interrupt (to the TSTORM)
1472                 * must only be done after chip is initialized
1473                 */
1474                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1475                                     fp->rx_sge_prod);
1476
1477                if (j != 0)
1478                        continue;
1479
1480                if (CHIP_IS_E1(bp)) {
1481                        REG_WR(bp, BAR_USTRORM_INTMEM +
1482                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1483                               U64_LO(fp->rx_comp_mapping));
1484                        REG_WR(bp, BAR_USTRORM_INTMEM +
1485                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1486                               U64_HI(fp->rx_comp_mapping));
1487                }
1488        }
1489}
1490
1491static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1492{
1493        u8 cos;
1494        struct bnx2x *bp = fp->bp;
1495
1496        for_each_cos_in_tx_queue(fp, cos) {
1497                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1498                unsigned pkts_compl = 0, bytes_compl = 0;
1499
1500                u16 sw_prod = txdata->tx_pkt_prod;
1501                u16 sw_cons = txdata->tx_pkt_cons;
1502
1503                while (sw_cons != sw_prod) {
1504                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1505                                          &pkts_compl, &bytes_compl);
1506                        sw_cons++;
1507                }
1508
1509                netdev_tx_reset_queue(
1510                        netdev_get_tx_queue(bp->dev,
1511                                            txdata->txq_index));
1512        }
1513}
1514
1515static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1516{
1517        int i;
1518
1519        for_each_tx_queue_cnic(bp, i) {
1520                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521        }
1522}
1523
1524static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1525{
1526        int i;
1527
1528        for_each_eth_queue(bp, i) {
1529                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1530        }
1531}
1532
1533static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1534{
1535        struct bnx2x *bp = fp->bp;
1536        int i;
1537
1538        /* ring wasn't allocated */
1539        if (fp->rx_buf_ring == NULL)
1540                return;
1541
1542        for (i = 0; i < NUM_RX_BD; i++) {
1543                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1544                u8 *data = rx_buf->data;
1545
1546                if (data == NULL)
1547                        continue;
1548                dma_unmap_single(&bp->pdev->dev,
1549                                 dma_unmap_addr(rx_buf, mapping),
1550                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1551
1552                rx_buf->data = NULL;
1553                bnx2x_frag_free(fp, data);
1554        }
1555}
1556
1557static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1558{
1559        int j;
1560
1561        for_each_rx_queue_cnic(bp, j) {
1562                bnx2x_free_rx_bds(&bp->fp[j]);
1563        }
1564}
1565
1566static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1567{
1568        int j;
1569
1570        for_each_eth_queue(bp, j) {
1571                struct bnx2x_fastpath *fp = &bp->fp[j];
1572
1573                bnx2x_free_rx_bds(fp);
1574
1575                if (fp->mode != TPA_MODE_DISABLED)
1576                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1577        }
1578}
1579
1580static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1581{
1582        bnx2x_free_tx_skbs_cnic(bp);
1583        bnx2x_free_rx_skbs_cnic(bp);
1584}
1585
1586void bnx2x_free_skbs(struct bnx2x *bp)
1587{
1588        bnx2x_free_tx_skbs(bp);
1589        bnx2x_free_rx_skbs(bp);
1590}
1591
1592void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1593{
1594        /* load old values */
1595        u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1596
1597        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1598                /* leave all but MAX value */
1599                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1600
1601                /* set new MAX value */
1602                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1603                                & FUNC_MF_CFG_MAX_BW_MASK;
1604
1605                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1606        }
1607}
1608
1609/**
1610 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1611 *
1612 * @bp:         driver handle
1613 * @nvecs:      number of vectors to be released
1614 */
1615static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1616{
1617        int i, offset = 0;
1618
1619        if (nvecs == offset)
1620                return;
1621
1622        /* VFs don't have a default SB */
1623        if (IS_PF(bp)) {
1624                free_irq(bp->msix_table[offset].vector, bp->dev);
1625                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1626                   bp->msix_table[offset].vector);
1627                offset++;
1628        }
1629
1630        if (CNIC_SUPPORT(bp)) {
1631                if (nvecs == offset)
1632                        return;
1633                offset++;
1634        }
1635
1636        for_each_eth_queue(bp, i) {
1637                if (nvecs == offset)
1638                        return;
1639                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1640                   i, bp->msix_table[offset].vector);
1641
1642                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1643        }
1644}
1645
1646void bnx2x_free_irq(struct bnx2x *bp)
1647{
1648        if (bp->flags & USING_MSIX_FLAG &&
1649            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1650                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1651
1652                /* vfs don't have a default status block */
1653                if (IS_PF(bp))
1654                        nvecs++;
1655
1656                bnx2x_free_msix_irqs(bp, nvecs);
1657        } else {
1658                free_irq(bp->dev->irq, bp->dev);
1659        }
1660}
1661
1662int bnx2x_enable_msix(struct bnx2x *bp)
1663{
1664        int msix_vec = 0, i, rc;
1665
1666        /* VFs don't have a default status block */
1667        if (IS_PF(bp)) {
1668                bp->msix_table[msix_vec].entry = msix_vec;
1669                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1670                               bp->msix_table[0].entry);
1671                msix_vec++;
1672        }
1673
1674        /* Cnic requires an msix vector for itself */
1675        if (CNIC_SUPPORT(bp)) {
1676                bp->msix_table[msix_vec].entry = msix_vec;
1677                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1678                               msix_vec, bp->msix_table[msix_vec].entry);
1679                msix_vec++;
1680        }
1681
1682        /* We need separate vectors for ETH queues only (not FCoE) */
1683        for_each_eth_queue(bp, i) {
1684                bp->msix_table[msix_vec].entry = msix_vec;
1685                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1686                               msix_vec, msix_vec, i);
1687                msix_vec++;
1688        }
1689
1690        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1691           msix_vec);
1692
1693        rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1694                                   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1695        /*
1696         * reconfigure number of tx/rx queues according to available
1697         * MSI-X vectors
1698         */
1699        if (rc == -ENOSPC) {
1700                /* Get by with single vector */
1701                rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1702                if (rc < 0) {
1703                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1704                                       rc);
1705                        goto no_msix;
1706                }
1707
1708                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1709                bp->flags |= USING_SINGLE_MSIX_FLAG;
1710
1711                BNX2X_DEV_INFO("set number of queues to 1\n");
1712                bp->num_ethernet_queues = 1;
1713                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1714        } else if (rc < 0) {
1715                BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1716                goto no_msix;
1717        } else if (rc < msix_vec) {
1718                /* how less vectors we will have? */
1719                int diff = msix_vec - rc;
1720
1721                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1722
1723                /*
1724                 * decrease number of queues by number of unallocated entries
1725                 */
1726                bp->num_ethernet_queues -= diff;
1727                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1728
1729                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1730                               bp->num_queues);
1731        }
1732
1733        bp->flags |= USING_MSIX_FLAG;
1734
1735        return 0;
1736
1737no_msix:
1738        /* fall to INTx if not enough memory */
1739        if (rc == -ENOMEM)
1740                bp->flags |= DISABLE_MSI_FLAG;
1741
1742        return rc;
1743}
1744
1745static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1746{
1747        int i, rc, offset = 0;
1748
1749        /* no default status block for vf */
1750        if (IS_PF(bp)) {
1751                rc = request_irq(bp->msix_table[offset++].vector,
1752                                 bnx2x_msix_sp_int, 0,
1753                                 bp->dev->name, bp->dev);
1754                if (rc) {
1755                        BNX2X_ERR("request sp irq failed\n");
1756                        return -EBUSY;
1757                }
1758        }
1759
1760        if (CNIC_SUPPORT(bp))
1761                offset++;
1762
1763        for_each_eth_queue(bp, i) {
1764                struct bnx2x_fastpath *fp = &bp->fp[i];
1765                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1766                         bp->dev->name, i);
1767
1768                rc = request_irq(bp->msix_table[offset].vector,
1769                                 bnx2x_msix_fp_int, 0, fp->name, fp);
1770                if (rc) {
1771                        BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1772                              bp->msix_table[offset].vector, rc);
1773                        bnx2x_free_msix_irqs(bp, offset);
1774                        return -EBUSY;
1775                }
1776
1777                offset++;
1778        }
1779
1780        i = BNX2X_NUM_ETH_QUEUES(bp);
1781        if (IS_PF(bp)) {
1782                offset = 1 + CNIC_SUPPORT(bp);
1783                netdev_info(bp->dev,
1784                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1785                            bp->msix_table[0].vector,
1786                            0, bp->msix_table[offset].vector,
1787                            i - 1, bp->msix_table[offset + i - 1].vector);
1788        } else {
1789                offset = CNIC_SUPPORT(bp);
1790                netdev_info(bp->dev,
1791                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1792                            0, bp->msix_table[offset].vector,
1793                            i - 1, bp->msix_table[offset + i - 1].vector);
1794        }
1795        return 0;
1796}
1797
1798int bnx2x_enable_msi(struct bnx2x *bp)
1799{
1800        int rc;
1801
1802        rc = pci_enable_msi(bp->pdev);
1803        if (rc) {
1804                BNX2X_DEV_INFO("MSI is not attainable\n");
1805                return -1;
1806        }
1807        bp->flags |= USING_MSI_FLAG;
1808
1809        return 0;
1810}
1811
1812static int bnx2x_req_irq(struct bnx2x *bp)
1813{
1814        unsigned long flags;
1815        unsigned int irq;
1816
1817        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1818                flags = 0;
1819        else
1820                flags = IRQF_SHARED;
1821
1822        if (bp->flags & USING_MSIX_FLAG)
1823                irq = bp->msix_table[0].vector;
1824        else
1825                irq = bp->pdev->irq;
1826
1827        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1828}
1829
1830static int bnx2x_setup_irqs(struct bnx2x *bp)
1831{
1832        int rc = 0;
1833        if (bp->flags & USING_MSIX_FLAG &&
1834            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1835                rc = bnx2x_req_msix_irqs(bp);
1836                if (rc)
1837                        return rc;
1838        } else {
1839                rc = bnx2x_req_irq(bp);
1840                if (rc) {
1841                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1842                        return rc;
1843                }
1844                if (bp->flags & USING_MSI_FLAG) {
1845                        bp->dev->irq = bp->pdev->irq;
1846                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1847                                    bp->dev->irq);
1848                }
1849                if (bp->flags & USING_MSIX_FLAG) {
1850                        bp->dev->irq = bp->msix_table[0].vector;
1851                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1852                                    bp->dev->irq);
1853                }
1854        }
1855
1856        return 0;
1857}
1858
1859static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1860{
1861        int i;
1862
1863        for_each_rx_queue_cnic(bp, i) {
1864                napi_enable(&bnx2x_fp(bp, i, napi));
1865        }
1866}
1867
1868static void bnx2x_napi_enable(struct bnx2x *bp)
1869{
1870        int i;
1871
1872        for_each_eth_queue(bp, i) {
1873                napi_enable(&bnx2x_fp(bp, i, napi));
1874        }
1875}
1876
1877static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1878{
1879        int i;
1880
1881        for_each_rx_queue_cnic(bp, i) {
1882                napi_disable(&bnx2x_fp(bp, i, napi));
1883        }
1884}
1885
1886static void bnx2x_napi_disable(struct bnx2x *bp)
1887{
1888        int i;
1889
1890        for_each_eth_queue(bp, i) {
1891                napi_disable(&bnx2x_fp(bp, i, napi));
1892        }
1893}
1894
1895void bnx2x_netif_start(struct bnx2x *bp)
1896{
1897        if (netif_running(bp->dev)) {
1898                bnx2x_napi_enable(bp);
1899                if (CNIC_LOADED(bp))
1900                        bnx2x_napi_enable_cnic(bp);
1901                bnx2x_int_enable(bp);
1902                if (bp->state == BNX2X_STATE_OPEN)
1903                        netif_tx_wake_all_queues(bp->dev);
1904        }
1905}
1906
1907void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1908{
1909        bnx2x_int_disable_sync(bp, disable_hw);
1910        bnx2x_napi_disable(bp);
1911        if (CNIC_LOADED(bp))
1912                bnx2x_napi_disable_cnic(bp);
1913}
1914
1915u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1916                       struct net_device *sb_dev)
1917{
1918        struct bnx2x *bp = netdev_priv(dev);
1919
1920        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1921                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1922                u16 ether_type = ntohs(hdr->h_proto);
1923
1924                /* Skip VLAN tag if present */
1925                if (ether_type == ETH_P_8021Q) {
1926                        struct vlan_ethhdr *vhdr =
1927                                (struct vlan_ethhdr *)skb->data;
1928
1929                        ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1930                }
1931
1932                /* If ethertype is FCoE or FIP - use FCoE ring */
1933                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1934                        return bnx2x_fcoe_tx(bp, txq_index);
1935        }
1936
1937        /* select a non-FCoE queue */
1938        return netdev_pick_tx(dev, skb, NULL) %
1939                        (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1940}
1941
1942void bnx2x_set_num_queues(struct bnx2x *bp)
1943{
1944        /* RSS queues */
1945        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1946
1947        /* override in STORAGE SD modes */
1948        if (IS_MF_STORAGE_ONLY(bp))
1949                bp->num_ethernet_queues = 1;
1950
1951        /* Add special queues */
1952        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1953        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1954
1955        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1956}
1957
1958/**
1959 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1960 *
1961 * @bp:         Driver handle
1962 * @include_cnic: handle cnic case
1963 *
1964 * We currently support for at most 16 Tx queues for each CoS thus we will
1965 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1966 * bp->max_cos.
1967 *
1968 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1969 * index after all ETH L2 indices.
1970 *
1971 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1972 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1973 * 16..31,...) with indices that are not coupled with any real Tx queue.
1974 *
1975 * The proper configuration of skb->queue_mapping is handled by
1976 * bnx2x_select_queue() and __skb_tx_hash().
1977 *
1978 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1979 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1980 */
1981static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1982{
1983        int rc, tx, rx;
1984
1985        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1986        rx = BNX2X_NUM_ETH_QUEUES(bp);
1987
1988/* account for fcoe queue */
1989        if (include_cnic && !NO_FCOE(bp)) {
1990                rx++;
1991                tx++;
1992        }
1993
1994        rc = netif_set_real_num_tx_queues(bp->dev, tx);
1995        if (rc) {
1996                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1997                return rc;
1998        }
1999        rc = netif_set_real_num_rx_queues(bp->dev, rx);
2000        if (rc) {
2001                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2002                return rc;
2003        }
2004
2005        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2006                          tx, rx);
2007
2008        return rc;
2009}
2010
2011static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2012{
2013        int i;
2014
2015        for_each_queue(bp, i) {
2016                struct bnx2x_fastpath *fp = &bp->fp[i];
2017                u32 mtu;
2018
2019                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2020                if (IS_FCOE_IDX(i))
2021                        /*
2022                         * Although there are no IP frames expected to arrive to
2023                         * this ring we still want to add an
2024                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2025                         * overrun attack.
2026                         */
2027                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2028                else
2029                        mtu = bp->dev->mtu;
2030                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2031                                  IP_HEADER_ALIGNMENT_PADDING +
2032                                  ETH_OVERHEAD +
2033                                  mtu +
2034                                  BNX2X_FW_RX_ALIGN_END;
2035                fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2036                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2037                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2038                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2039                else
2040                        fp->rx_frag_size = 0;
2041        }
2042}
2043
2044static int bnx2x_init_rss(struct bnx2x *bp)
2045{
2046        int i;
2047        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2048
2049        /* Prepare the initial contents for the indirection table if RSS is
2050         * enabled
2051         */
2052        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2053                bp->rss_conf_obj.ind_table[i] =
2054                        bp->fp->cl_id +
2055                        ethtool_rxfh_indir_default(i, num_eth_queues);
2056
2057        /*
2058         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2059         * per-port, so if explicit configuration is needed , do it only
2060         * for a PMF.
2061         *
2062         * For 57712 and newer on the other hand it's a per-function
2063         * configuration.
2064         */
2065        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2066}
2067
2068int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2069              bool config_hash, bool enable)
2070{
2071        struct bnx2x_config_rss_params params = {NULL};
2072
2073        /* Although RSS is meaningless when there is a single HW queue we
2074         * still need it enabled in order to have HW Rx hash generated.
2075         *
2076         * if (!is_eth_multi(bp))
2077         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2078         */
2079
2080        params.rss_obj = rss_obj;
2081
2082        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2083
2084        if (enable) {
2085                __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2086
2087                /* RSS configuration */
2088                __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2089                __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2090                __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2091                __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2092                if (rss_obj->udp_rss_v4)
2093                        __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2094                if (rss_obj->udp_rss_v6)
2095                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2096
2097                if (!CHIP_IS_E1x(bp)) {
2098                        /* valid only for TUNN_MODE_VXLAN tunnel mode */
2099                        __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2100                        __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2101
2102                        /* valid only for TUNN_MODE_GRE tunnel mode */
2103                        __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2104                }
2105        } else {
2106                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2107        }
2108
2109        /* Hash bits */
2110        params.rss_result_mask = MULTI_MASK;
2111
2112        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2113
2114        if (config_hash) {
2115                /* RSS keys */
2116                netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2117                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2118        }
2119
2120        if (IS_PF(bp))
2121                return bnx2x_config_rss(bp, &params);
2122        else
2123                return bnx2x_vfpf_config_rss(bp, &params);
2124}
2125
2126static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2127{
2128        struct bnx2x_func_state_params func_params = {NULL};
2129
2130        /* Prepare parameters for function state transitions */
2131        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2132
2133        func_params.f_obj = &bp->func_obj;
2134        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2135
2136        func_params.params.hw_init.load_phase = load_code;
2137
2138        return bnx2x_func_state_change(bp, &func_params);
2139}
2140
2141/*
2142 * Cleans the object that have internal lists without sending
2143 * ramrods. Should be run when interrupts are disabled.
2144 */
2145void bnx2x_squeeze_objects(struct bnx2x *bp)
2146{
2147        int rc;
2148        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2149        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2150        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2151
2152        /***************** Cleanup MACs' object first *************************/
2153
2154        /* Wait for completion of requested */
2155        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2156        /* Perform a dry cleanup */
2157        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2158
2159        /* Clean ETH primary MAC */
2160        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2161        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2162                                 &ramrod_flags);
2163        if (rc != 0)
2164                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2165
2166        /* Cleanup UC list */
2167        vlan_mac_flags = 0;
2168        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2169        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2170                                 &ramrod_flags);
2171        if (rc != 0)
2172                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2173
2174        /***************** Now clean mcast object *****************************/
2175        rparam.mcast_obj = &bp->mcast_obj;
2176        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2177
2178        /* Add a DEL command... - Since we're doing a driver cleanup only,
2179         * we take a lock surrounding both the initial send and the CONTs,
2180         * as we don't want a true completion to disrupt us in the middle.
2181         */
2182        netif_addr_lock_bh(bp->dev);
2183        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2184        if (rc < 0)
2185                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2186                          rc);
2187
2188        /* ...and wait until all pending commands are cleared */
2189        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2190        while (rc != 0) {
2191                if (rc < 0) {
2192                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2193                                  rc);
2194                        netif_addr_unlock_bh(bp->dev);
2195                        return;
2196                }
2197
2198                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2199        }
2200        netif_addr_unlock_bh(bp->dev);
2201}
2202
2203#ifndef BNX2X_STOP_ON_ERROR
2204#define LOAD_ERROR_EXIT(bp, label) \
2205        do { \
2206                (bp)->state = BNX2X_STATE_ERROR; \
2207                goto label; \
2208        } while (0)
2209
2210#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2211        do { \
2212                bp->cnic_loaded = false; \
2213                goto label; \
2214        } while (0)
2215#else /*BNX2X_STOP_ON_ERROR*/
2216#define LOAD_ERROR_EXIT(bp, label) \
2217        do { \
2218                (bp)->state = BNX2X_STATE_ERROR; \
2219                (bp)->panic = 1; \
2220                return -EBUSY; \
2221        } while (0)
2222#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2223        do { \
2224                bp->cnic_loaded = false; \
2225                (bp)->panic = 1; \
2226                return -EBUSY; \
2227        } while (0)
2228#endif /*BNX2X_STOP_ON_ERROR*/
2229
2230static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2231{
2232        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2233                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2234        return;
2235}
2236
2237static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2238{
2239        int num_groups, vf_headroom = 0;
2240        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2241
2242        /* number of queues for statistics is number of eth queues + FCoE */
2243        u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2244
2245        /* Total number of FW statistics requests =
2246         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2247         * and fcoe l2 queue) stats + num of queues (which includes another 1
2248         * for fcoe l2 queue if applicable)
2249         */
2250        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2251
2252        /* vf stats appear in the request list, but their data is allocated by
2253         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2254         * it is used to determine where to place the vf stats queries in the
2255         * request struct
2256         */
2257        if (IS_SRIOV(bp))
2258                vf_headroom = bnx2x_vf_headroom(bp);
2259
2260        /* Request is built from stats_query_header and an array of
2261         * stats_query_cmd_group each of which contains
2262         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2263         * configured in the stats_query_header.
2264         */
2265        num_groups =
2266                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2267                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2268                 1 : 0));
2269
2270        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2271           bp->fw_stats_num, vf_headroom, num_groups);
2272        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2273                num_groups * sizeof(struct stats_query_cmd_group);
2274
2275        /* Data for statistics requests + stats_counter
2276         * stats_counter holds per-STORM counters that are incremented
2277         * when STORM has finished with the current request.
2278         * memory for FCoE offloaded statistics are counted anyway,
2279         * even if they will not be sent.
2280         * VF stats are not accounted for here as the data of VF stats is stored
2281         * in memory allocated by the VF, not here.
2282         */
2283        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2284                sizeof(struct per_pf_stats) +
2285                sizeof(struct fcoe_statistics_params) +
2286                sizeof(struct per_queue_stats) * num_queue_stats +
2287                sizeof(struct stats_counter);
2288
2289        bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2290                                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2291        if (!bp->fw_stats)
2292                goto alloc_mem_err;
2293
2294        /* Set shortcuts */
2295        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2296        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2297        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2298                ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2299        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2300                bp->fw_stats_req_sz;
2301
2302        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2303           U64_HI(bp->fw_stats_req_mapping),
2304           U64_LO(bp->fw_stats_req_mapping));
2305        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2306           U64_HI(bp->fw_stats_data_mapping),
2307           U64_LO(bp->fw_stats_data_mapping));
2308        return 0;
2309
2310alloc_mem_err:
2311        bnx2x_free_fw_stats_mem(bp);
2312        BNX2X_ERR("Can't allocate FW stats memory\n");
2313        return -ENOMEM;
2314}
2315
2316/* send load request to mcp and analyze response */
2317static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2318{
2319        u32 param;
2320
2321        /* init fw_seq */
2322        bp->fw_seq =
2323                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2324                 DRV_MSG_SEQ_NUMBER_MASK);
2325        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2326
2327        /* Get current FW pulse sequence */
2328        bp->fw_drv_pulse_wr_seq =
2329                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2330                 DRV_PULSE_SEQ_MASK);
2331        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2332
2333        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2334
2335        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2336                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2337
2338        /* load request */
2339        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2340
2341        /* if mcp fails to respond we must abort */
2342        if (!(*load_code)) {
2343                BNX2X_ERR("MCP response failure, aborting\n");
2344                return -EBUSY;
2345        }
2346
2347        /* If mcp refused (e.g. other port is in diagnostic mode) we
2348         * must abort
2349         */
2350        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2351                BNX2X_ERR("MCP refused load request, aborting\n");
2352                return -EBUSY;
2353        }
2354        return 0;
2355}
2356
2357/* check whether another PF has already loaded FW to chip. In
2358 * virtualized environments a pf from another VM may have already
2359 * initialized the device including loading FW
2360 */
2361int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2362{
2363        /* is another pf loaded on this engine? */
2364        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2365            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2366                /* build my FW version dword */
2367                u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2368                        (BCM_5710_FW_MINOR_VERSION << 8) +
2369                        (BCM_5710_FW_REVISION_VERSION << 16) +
2370                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2371
2372                /* read loaded FW from chip */
2373                u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2374
2375                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2376                   loaded_fw, my_fw);
2377
2378                /* abort nic load if version mismatch */
2379                if (my_fw != loaded_fw) {
2380                        if (print_err)
2381                                BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2382                                          loaded_fw, my_fw);
2383                        else
2384                                BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2385                                               loaded_fw, my_fw);
2386                        return -EBUSY;
2387                }
2388        }
2389        return 0;
2390}
2391
2392/* returns the "mcp load_code" according to global load_count array */
2393static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2394{
2395        int path = BP_PATH(bp);
2396
2397        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2398           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2399           bnx2x_load_count[path][2]);
2400        bnx2x_load_count[path][0]++;
2401        bnx2x_load_count[path][1 + port]++;
2402        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2403           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2404           bnx2x_load_count[path][2]);
2405        if (bnx2x_load_count[path][0] == 1)
2406                return FW_MSG_CODE_DRV_LOAD_COMMON;
2407        else if (bnx2x_load_count[path][1 + port] == 1)
2408                return FW_MSG_CODE_DRV_LOAD_PORT;
2409        else
2410                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2411}
2412
2413/* mark PMF if applicable */
2414static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2415{
2416        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2417            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2418            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2419                bp->port.pmf = 1;
2420                /* We need the barrier to ensure the ordering between the
2421                 * writing to bp->port.pmf here and reading it from the
2422                 * bnx2x_periodic_task().
2423                 */
2424                smp_mb();
2425        } else {
2426                bp->port.pmf = 0;
2427        }
2428
2429        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2430}
2431
2432static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2433{
2434        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2435             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2436            (bp->common.shmem2_base)) {
2437                if (SHMEM2_HAS(bp, dcc_support))
2438                        SHMEM2_WR(bp, dcc_support,
2439                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2440                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2441                if (SHMEM2_HAS(bp, afex_driver_support))
2442                        SHMEM2_WR(bp, afex_driver_support,
2443                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2444        }
2445
2446        /* Set AFEX default VLAN tag to an invalid value */
2447        bp->afex_def_vlan_tag = -1;
2448}
2449
2450/**
2451 * bnx2x_bz_fp - zero content of the fastpath structure.
2452 *
2453 * @bp:         driver handle
2454 * @index:      fastpath index to be zeroed
2455 *
2456 * Makes sure the contents of the bp->fp[index].napi is kept
2457 * intact.
2458 */
2459static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2460{
2461        struct bnx2x_fastpath *fp = &bp->fp[index];
2462        int cos;
2463        struct napi_struct orig_napi = fp->napi;
2464        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2465
2466        /* bzero bnx2x_fastpath contents */
2467        if (fp->tpa_info)
2468                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2469                       sizeof(struct bnx2x_agg_info));
2470        memset(fp, 0, sizeof(*fp));
2471
2472        /* Restore the NAPI object as it has been already initialized */
2473        fp->napi = orig_napi;
2474        fp->tpa_info = orig_tpa_info;
2475        fp->bp = bp;
2476        fp->index = index;
2477        if (IS_ETH_FP(fp))
2478                fp->max_cos = bp->max_cos;
2479        else
2480                /* Special queues support only one CoS */
2481                fp->max_cos = 1;
2482
2483        /* Init txdata pointers */
2484        if (IS_FCOE_FP(fp))
2485                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2486        if (IS_ETH_FP(fp))
2487                for_each_cos_in_tx_queue(fp, cos)
2488                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2489                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2490
2491        /* set the tpa flag for each queue. The tpa flag determines the queue
2492         * minimal size so it must be set prior to queue memory allocation
2493         */
2494        if (bp->dev->features & NETIF_F_LRO)
2495                fp->mode = TPA_MODE_LRO;
2496        else if (bp->dev->features & NETIF_F_GRO_HW)
2497                fp->mode = TPA_MODE_GRO;
2498        else
2499                fp->mode = TPA_MODE_DISABLED;
2500
2501        /* We don't want TPA if it's disabled in bp
2502         * or if this is an FCoE L2 ring.
2503         */
2504        if (bp->disable_tpa || IS_FCOE_FP(fp))
2505                fp->mode = TPA_MODE_DISABLED;
2506}
2507
2508void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2509{
2510        u32 cur;
2511
2512        if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2513                return;
2514
2515        cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2516        DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2517           cur, state);
2518
2519        SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2520}
2521
2522int bnx2x_load_cnic(struct bnx2x *bp)
2523{
2524        int i, rc, port = BP_PORT(bp);
2525
2526        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2527
2528        mutex_init(&bp->cnic_mutex);
2529
2530        if (IS_PF(bp)) {
2531                rc = bnx2x_alloc_mem_cnic(bp);
2532                if (rc) {
2533                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2534                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2535                }
2536        }
2537
2538        rc = bnx2x_alloc_fp_mem_cnic(bp);
2539        if (rc) {
2540                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2541                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2542        }
2543
2544        /* Update the number of queues with the cnic queues */
2545        rc = bnx2x_set_real_num_queues(bp, 1);
2546        if (rc) {
2547                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2548                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2549        }
2550
2551        /* Add all CNIC NAPI objects */
2552        bnx2x_add_all_napi_cnic(bp);
2553        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2554        bnx2x_napi_enable_cnic(bp);
2555
2556        rc = bnx2x_init_hw_func_cnic(bp);
2557        if (rc)
2558                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2559
2560        bnx2x_nic_init_cnic(bp);
2561
2562        if (IS_PF(bp)) {
2563                /* Enable Timer scan */
2564                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2565
2566                /* setup cnic queues */
2567                for_each_cnic_queue(bp, i) {
2568                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2569                        if (rc) {
2570                                BNX2X_ERR("Queue setup failed\n");
2571                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2572                        }
2573                }
2574        }
2575
2576        /* Initialize Rx filter. */
2577        bnx2x_set_rx_mode_inner(bp);
2578
2579        /* re-read iscsi info */
2580        bnx2x_get_iscsi_info(bp);
2581        bnx2x_setup_cnic_irq_info(bp);
2582        bnx2x_setup_cnic_info(bp);
2583        bp->cnic_loaded = true;
2584        if (bp->state == BNX2X_STATE_OPEN)
2585                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2586
2587        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2588
2589        return 0;
2590
2591#ifndef BNX2X_STOP_ON_ERROR
2592load_error_cnic2:
2593        /* Disable Timer scan */
2594        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2595
2596load_error_cnic1:
2597        bnx2x_napi_disable_cnic(bp);
2598        /* Update the number of queues without the cnic queues */
2599        if (bnx2x_set_real_num_queues(bp, 0))
2600                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2601load_error_cnic0:
2602        BNX2X_ERR("CNIC-related load failed\n");
2603        bnx2x_free_fp_mem_cnic(bp);
2604        bnx2x_free_mem_cnic(bp);
2605        return rc;
2606#endif /* ! BNX2X_STOP_ON_ERROR */
2607}
2608
2609/* must be called with rtnl_lock */
2610int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2611{
2612        int port = BP_PORT(bp);
2613        int i, rc = 0, load_code = 0;
2614
2615        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2616        DP(NETIF_MSG_IFUP,
2617           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2618
2619#ifdef BNX2X_STOP_ON_ERROR
2620        if (unlikely(bp->panic)) {
2621                BNX2X_ERR("Can't load NIC when there is panic\n");
2622                return -EPERM;
2623        }
2624#endif
2625
2626        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2627
2628        /* zero the structure w/o any lock, before SP handler is initialized */
2629        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2630        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2631                &bp->last_reported_link.link_report_flags);
2632
2633        if (IS_PF(bp))
2634                /* must be called before memory allocation and HW init */
2635                bnx2x_ilt_set_info(bp);
2636
2637        /*
2638         * Zero fastpath structures preserving invariants like napi, which are
2639         * allocated only once, fp index, max_cos, bp pointer.
2640         * Also set fp->mode and txdata_ptr.
2641         */
2642        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2643        for_each_queue(bp, i)
2644                bnx2x_bz_fp(bp, i);
2645        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2646                                  bp->num_cnic_queues) *
2647                                  sizeof(struct bnx2x_fp_txdata));
2648
2649        bp->fcoe_init = false;
2650
2651        /* Set the receive queues buffer size */
2652        bnx2x_set_rx_buf_size(bp);
2653
2654        if (IS_PF(bp)) {
2655                rc = bnx2x_alloc_mem(bp);
2656                if (rc) {
2657                        BNX2X_ERR("Unable to allocate bp memory\n");
2658                        return rc;
2659                }
2660        }
2661
2662        /* need to be done after alloc mem, since it's self adjusting to amount
2663         * of memory available for RSS queues
2664         */
2665        rc = bnx2x_alloc_fp_mem(bp);
2666        if (rc) {
2667                BNX2X_ERR("Unable to allocate memory for fps\n");
2668                LOAD_ERROR_EXIT(bp, load_error0);
2669        }
2670
2671        /* Allocated memory for FW statistics  */
2672        rc = bnx2x_alloc_fw_stats_mem(bp);
2673        if (rc)
2674                LOAD_ERROR_EXIT(bp, load_error0);
2675
2676        /* request pf to initialize status blocks */
2677        if (IS_VF(bp)) {
2678                rc = bnx2x_vfpf_init(bp);
2679                if (rc)
2680                        LOAD_ERROR_EXIT(bp, load_error0);
2681        }
2682
2683        /* As long as bnx2x_alloc_mem() may possibly update
2684         * bp->num_queues, bnx2x_set_real_num_queues() should always
2685         * come after it. At this stage cnic queues are not counted.
2686         */
2687        rc = bnx2x_set_real_num_queues(bp, 0);
2688        if (rc) {
2689                BNX2X_ERR("Unable to set real_num_queues\n");
2690                LOAD_ERROR_EXIT(bp, load_error0);
2691        }
2692
2693        /* configure multi cos mappings in kernel.
2694         * this configuration may be overridden by a multi class queue
2695         * discipline or by a dcbx negotiation result.
2696         */
2697        bnx2x_setup_tc(bp->dev, bp->max_cos);
2698
2699        /* Add all NAPI objects */
2700        bnx2x_add_all_napi(bp);
2701        DP(NETIF_MSG_IFUP, "napi added\n");
2702        bnx2x_napi_enable(bp);
2703
2704        if (IS_PF(bp)) {
2705                /* set pf load just before approaching the MCP */
2706                bnx2x_set_pf_load(bp);
2707
2708                /* if mcp exists send load request and analyze response */
2709                if (!BP_NOMCP(bp)) {
2710                        /* attempt to load pf */
2711                        rc = bnx2x_nic_load_request(bp, &load_code);
2712                        if (rc)
2713                                LOAD_ERROR_EXIT(bp, load_error1);
2714
2715                        /* what did mcp say? */
2716                        rc = bnx2x_compare_fw_ver(bp, load_code, true);
2717                        if (rc) {
2718                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2719                                LOAD_ERROR_EXIT(bp, load_error2);
2720                        }
2721                } else {
2722                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2723                }
2724
2725                /* mark pmf if applicable */
2726                bnx2x_nic_load_pmf(bp, load_code);
2727
2728                /* Init Function state controlling object */
2729                bnx2x__init_func_obj(bp);
2730
2731                /* Initialize HW */
2732                rc = bnx2x_init_hw(bp, load_code);
2733                if (rc) {
2734                        BNX2X_ERR("HW init failed, aborting\n");
2735                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2736                        LOAD_ERROR_EXIT(bp, load_error2);
2737                }
2738        }
2739
2740        bnx2x_pre_irq_nic_init(bp);
2741
2742        /* Connect to IRQs */
2743        rc = bnx2x_setup_irqs(bp);
2744        if (rc) {
2745                BNX2X_ERR("setup irqs failed\n");
2746                if (IS_PF(bp))
2747                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2748                LOAD_ERROR_EXIT(bp, load_error2);
2749        }
2750
2751        /* Init per-function objects */
2752        if (IS_PF(bp)) {
2753                /* Setup NIC internals and enable interrupts */
2754                bnx2x_post_irq_nic_init(bp, load_code);
2755
2756                bnx2x_init_bp_objs(bp);
2757                bnx2x_iov_nic_init(bp);
2758
2759                /* Set AFEX default VLAN tag to an invalid value */
2760                bp->afex_def_vlan_tag = -1;
2761                bnx2x_nic_load_afex_dcc(bp, load_code);
2762                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2763                rc = bnx2x_func_start(bp);
2764                if (rc) {
2765                        BNX2X_ERR("Function start failed!\n");
2766                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2767
2768                        LOAD_ERROR_EXIT(bp, load_error3);
2769                }
2770
2771                /* Send LOAD_DONE command to MCP */
2772                if (!BP_NOMCP(bp)) {
2773                        load_code = bnx2x_fw_command(bp,
2774                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2775                        if (!load_code) {
2776                                BNX2X_ERR("MCP response failure, aborting\n");
2777                                rc = -EBUSY;
2778                                LOAD_ERROR_EXIT(bp, load_error3);
2779                        }
2780                }
2781
2782                /* initialize FW coalescing state machines in RAM */
2783                bnx2x_update_coalesce(bp);
2784        }
2785
2786        /* setup the leading queue */
2787        rc = bnx2x_setup_leading(bp);
2788        if (rc) {
2789                BNX2X_ERR("Setup leading failed!\n");
2790                LOAD_ERROR_EXIT(bp, load_error3);
2791        }
2792
2793        /* set up the rest of the queues */
2794        for_each_nondefault_eth_queue(bp, i) {
2795                if (IS_PF(bp))
2796                        rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2797                else /* VF */
2798                        rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2799                if (rc) {
2800                        BNX2X_ERR("Queue %d setup failed\n", i);
2801                        LOAD_ERROR_EXIT(bp, load_error3);
2802                }
2803        }
2804
2805        /* setup rss */
2806        rc = bnx2x_init_rss(bp);
2807        if (rc) {
2808                BNX2X_ERR("PF RSS init failed\n");
2809                LOAD_ERROR_EXIT(bp, load_error3);
2810        }
2811
2812        /* Now when Clients are configured we are ready to work */
2813        bp->state = BNX2X_STATE_OPEN;
2814
2815        /* Configure a ucast MAC */
2816        if (IS_PF(bp))
2817                rc = bnx2x_set_eth_mac(bp, true);
2818        else /* vf */
2819                rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2820                                           true);
2821        if (rc) {
2822                BNX2X_ERR("Setting Ethernet MAC failed\n");
2823                LOAD_ERROR_EXIT(bp, load_error3);
2824        }
2825
2826        if (IS_PF(bp) && bp->pending_max) {
2827                bnx2x_update_max_mf_config(bp, bp->pending_max);
2828                bp->pending_max = 0;
2829        }
2830
2831        bp->force_link_down = false;
2832        if (bp->port.pmf) {
2833                rc = bnx2x_initial_phy_init(bp, load_mode);
2834                if (rc)
2835                        LOAD_ERROR_EXIT(bp, load_error3);
2836        }
2837        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2838
2839        /* Start fast path */
2840
2841        /* Re-configure vlan filters */
2842        rc = bnx2x_vlan_reconfigure_vid(bp);
2843        if (rc)
2844                LOAD_ERROR_EXIT(bp, load_error3);
2845
2846        /* Initialize Rx filter. */
2847        bnx2x_set_rx_mode_inner(bp);
2848
2849        if (bp->flags & PTP_SUPPORTED) {
2850                bnx2x_register_phc(bp);
2851                bnx2x_init_ptp(bp);
2852                bnx2x_configure_ptp_filters(bp);
2853        }
2854        /* Start Tx */
2855        switch (load_mode) {
2856        case LOAD_NORMAL:
2857                /* Tx queue should be only re-enabled */
2858                netif_tx_wake_all_queues(bp->dev);
2859                break;
2860
2861        case LOAD_OPEN:
2862                netif_tx_start_all_queues(bp->dev);
2863                smp_mb__after_atomic();
2864                break;
2865
2866        case LOAD_DIAG:
2867        case LOAD_LOOPBACK_EXT:
2868                bp->state = BNX2X_STATE_DIAG;
2869                break;
2870
2871        default:
2872                break;
2873        }
2874
2875        if (bp->port.pmf)
2876                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2877        else
2878                bnx2x__link_status_update(bp);
2879
2880        /* start the timer */
2881        mod_timer(&bp->timer, jiffies + bp->current_interval);
2882
2883        if (CNIC_ENABLED(bp))
2884                bnx2x_load_cnic(bp);
2885
2886        if (IS_PF(bp))
2887                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2888
2889        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2890                /* mark driver is loaded in shmem2 */
2891                u32 val;
2892                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2893                val &= ~DRV_FLAGS_MTU_MASK;
2894                val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2895                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2896                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2897                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2898        }
2899
2900        /* Wait for all pending SP commands to complete */
2901        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2902                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2903                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2904                return -EBUSY;
2905        }
2906
2907        /* Update driver data for On-Chip MFW dump. */
2908        if (IS_PF(bp))
2909                bnx2x_update_mfw_dump(bp);
2910
2911        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2912        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2913                bnx2x_dcbx_init(bp, false);
2914
2915        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2916                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2917
2918        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2919
2920        return 0;
2921
2922#ifndef BNX2X_STOP_ON_ERROR
2923load_error3:
2924        if (IS_PF(bp)) {
2925                bnx2x_int_disable_sync(bp, 1);
2926
2927                /* Clean queueable objects */
2928                bnx2x_squeeze_objects(bp);
2929        }
2930
2931        /* Free SKBs, SGEs, TPA pool and driver internals */
2932        bnx2x_free_skbs(bp);
2933        for_each_rx_queue(bp, i)
2934                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2935
2936        /* Release IRQs */
2937        bnx2x_free_irq(bp);
2938load_error2:
2939        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2940                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2941                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2942        }
2943
2944        bp->port.pmf = 0;
2945load_error1:
2946        bnx2x_napi_disable(bp);
2947        bnx2x_del_all_napi(bp);
2948
2949        /* clear pf_load status, as it was already set */
2950        if (IS_PF(bp))
2951                bnx2x_clear_pf_load(bp);
2952load_error0:
2953        bnx2x_free_fw_stats_mem(bp);
2954        bnx2x_free_fp_mem(bp);
2955        bnx2x_free_mem(bp);
2956
2957        return rc;
2958#endif /* ! BNX2X_STOP_ON_ERROR */
2959}
2960
2961int bnx2x_drain_tx_queues(struct bnx2x *bp)
2962{
2963        u8 rc = 0, cos, i;
2964
2965        /* Wait until tx fastpath tasks complete */
2966        for_each_tx_queue(bp, i) {
2967                struct bnx2x_fastpath *fp = &bp->fp[i];
2968
2969                for_each_cos_in_tx_queue(fp, cos)
2970                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2971                if (rc)
2972                        return rc;
2973        }
2974        return 0;
2975}
2976
2977/* must be called with rtnl_lock */
2978int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2979{
2980        int i;
2981        bool global = false;
2982
2983        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2984
2985        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2986                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2987
2988        /* mark driver is unloaded in shmem2 */
2989        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2990                u32 val;
2991                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2992                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2993                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2994        }
2995
2996        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2997            (bp->state == BNX2X_STATE_CLOSED ||
2998             bp->state == BNX2X_STATE_ERROR)) {
2999                /* We can get here if the driver has been unloaded
3000                 * during parity error recovery and is either waiting for a
3001                 * leader to complete or for other functions to unload and
3002                 * then ifdown has been issued. In this case we want to
3003                 * unload and let other functions to complete a recovery
3004                 * process.
3005                 */
3006                bp->recovery_state = BNX2X_RECOVERY_DONE;
3007                bp->is_leader = 0;
3008                bnx2x_release_leader_lock(bp);
3009                smp_mb();
3010
3011                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3012                BNX2X_ERR("Can't unload in closed or error state\n");
3013                return -EINVAL;
3014        }
3015
3016        /* Nothing to do during unload if previous bnx2x_nic_load()
3017         * have not completed successfully - all resources are released.
3018         *
3019         * we can get here only after unsuccessful ndo_* callback, during which
3020         * dev->IFF_UP flag is still on.
3021         */
3022        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3023                return 0;
3024
3025        /* It's important to set the bp->state to the value different from
3026         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3027         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3028         */
3029        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3030        smp_mb();
3031
3032        /* indicate to VFs that the PF is going down */
3033        bnx2x_iov_channel_down(bp);
3034
3035        if (CNIC_LOADED(bp))
3036                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3037
3038        /* Stop Tx */
3039        bnx2x_tx_disable(bp);
3040        netdev_reset_tc(bp->dev);
3041
3042        bp->rx_mode = BNX2X_RX_MODE_NONE;
3043
3044        del_timer_sync(&bp->timer);
3045
3046        if (IS_PF(bp) && !BP_NOMCP(bp)) {
3047                /* Set ALWAYS_ALIVE bit in shmem */
3048                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3049                bnx2x_drv_pulse(bp);
3050                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3051                bnx2x_save_statistics(bp);
3052        }
3053
3054        /* wait till consumers catch up with producers in all queues.
3055         * If we're recovering, FW can't write to host so no reason
3056         * to wait for the queues to complete all Tx.
3057         */
3058        if (unload_mode != UNLOAD_RECOVERY)
3059                bnx2x_drain_tx_queues(bp);
3060
3061        /* if VF indicate to PF this function is going down (PF will delete sp
3062         * elements and clear initializations
3063         */
3064        if (IS_VF(bp)) {
3065                bnx2x_clear_vlan_info(bp);
3066                bnx2x_vfpf_close_vf(bp);
3067        } else if (unload_mode != UNLOAD_RECOVERY) {
3068                /* if this is a normal/close unload need to clean up chip*/
3069                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3070        } else {
3071                /* Send the UNLOAD_REQUEST to the MCP */
3072                bnx2x_send_unload_req(bp, unload_mode);
3073
3074                /* Prevent transactions to host from the functions on the
3075                 * engine that doesn't reset global blocks in case of global
3076                 * attention once global blocks are reset and gates are opened
3077                 * (the engine which leader will perform the recovery
3078                 * last).
3079                 */
3080                if (!CHIP_IS_E1x(bp))
3081                        bnx2x_pf_disable(bp);
3082
3083                /* Disable HW interrupts, NAPI */
3084                bnx2x_netif_stop(bp, 1);
3085                /* Delete all NAPI objects */
3086                bnx2x_del_all_napi(bp);
3087                if (CNIC_LOADED(bp))
3088                        bnx2x_del_all_napi_cnic(bp);
3089                /* Release IRQs */
3090                bnx2x_free_irq(bp);
3091
3092                /* Report UNLOAD_DONE to MCP */
3093                bnx2x_send_unload_done(bp, false);
3094        }
3095
3096        /*
3097         * At this stage no more interrupts will arrive so we may safely clean
3098         * the queueable objects here in case they failed to get cleaned so far.
3099         */
3100        if (IS_PF(bp))
3101                bnx2x_squeeze_objects(bp);
3102
3103        /* There should be no more pending SP commands at this stage */
3104        bp->sp_state = 0;
3105
3106        bp->port.pmf = 0;
3107
3108        /* clear pending work in rtnl task */
3109        bp->sp_rtnl_state = 0;
3110        smp_mb();
3111
3112        /* Free SKBs, SGEs, TPA pool and driver internals */
3113        bnx2x_free_skbs(bp);
3114        if (CNIC_LOADED(bp))
3115                bnx2x_free_skbs_cnic(bp);
3116        for_each_rx_queue(bp, i)
3117                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3118
3119        bnx2x_free_fp_mem(bp);
3120        if (CNIC_LOADED(bp))
3121                bnx2x_free_fp_mem_cnic(bp);
3122
3123        if (IS_PF(bp)) {
3124                if (CNIC_LOADED(bp))
3125                        bnx2x_free_mem_cnic(bp);
3126        }
3127        bnx2x_free_mem(bp);
3128
3129        bp->state = BNX2X_STATE_CLOSED;
3130        bp->cnic_loaded = false;
3131
3132        /* Clear driver version indication in shmem */
3133        if (IS_PF(bp) && !BP_NOMCP(bp))
3134                bnx2x_update_mng_version(bp);
3135
3136        /* Check if there are pending parity attentions. If there are - set
3137         * RECOVERY_IN_PROGRESS.
3138         */
3139        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3140                bnx2x_set_reset_in_progress(bp);
3141
3142                /* Set RESET_IS_GLOBAL if needed */
3143                if (global)
3144                        bnx2x_set_reset_global(bp);
3145        }
3146
3147        /* The last driver must disable a "close the gate" if there is no
3148         * parity attention or "process kill" pending.
3149         */
3150        if (IS_PF(bp) &&
3151            !bnx2x_clear_pf_load(bp) &&
3152            bnx2x_reset_is_done(bp, BP_PATH(bp)))
3153                bnx2x_disable_close_the_gate(bp);
3154
3155        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3156
3157        return 0;
3158}
3159
3160int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3161{
3162        u16 pmcsr;
3163
3164        /* If there is no power capability, silently succeed */
3165        if (!bp->pdev->pm_cap) {
3166                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3167                return 0;
3168        }
3169
3170        pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3171
3172        switch (state) {
3173        case PCI_D0:
3174                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3175                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3176                                       PCI_PM_CTRL_PME_STATUS));
3177
3178                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3179                        /* delay required during transition out of D3hot */
3180                        msleep(20);
3181                break;
3182
3183        case PCI_D3hot:
3184                /* If there are other clients above don't
3185                   shut down the power */
3186                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3187                        return 0;
3188                /* Don't shut down the power for emulation and FPGA */
3189                if (CHIP_REV_IS_SLOW(bp))
3190                        return 0;
3191
3192                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3193                pmcsr |= 3;
3194
3195                if (bp->wol)
3196                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3197
3198                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3199                                      pmcsr);
3200
3201                /* No more memory access after this point until
3202                * device is brought back to D0.
3203                */
3204                break;
3205
3206        default:
3207                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3208                return -EINVAL;
3209        }
3210        return 0;
3211}
3212
3213/*
3214 * net_device service functions
3215 */
3216static int bnx2x_poll(struct napi_struct *napi, int budget)
3217{
3218        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3219                                                 napi);
3220        struct bnx2x *bp = fp->bp;
3221        int rx_work_done;
3222        u8 cos;
3223
3224#ifdef BNX2X_STOP_ON_ERROR
3225        if (unlikely(bp->panic)) {
3226                napi_complete(napi);
3227                return 0;
3228        }
3229#endif
3230        for_each_cos_in_tx_queue(fp, cos)
3231                if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3232                        bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3233
3234        rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3235
3236        if (rx_work_done < budget) {
3237                /* No need to update SB for FCoE L2 ring as long as
3238                 * it's connected to the default SB and the SB
3239                 * has been updated when NAPI was scheduled.
3240                 */
3241                if (IS_FCOE_FP(fp)) {
3242                        napi_complete_done(napi, rx_work_done);
3243                } else {
3244                        bnx2x_update_fpsb_idx(fp);
3245                        /* bnx2x_has_rx_work() reads the status block,
3246                         * thus we need to ensure that status block indices
3247                         * have been actually read (bnx2x_update_fpsb_idx)
3248                         * prior to this check (bnx2x_has_rx_work) so that
3249                         * we won't write the "newer" value of the status block
3250                         * to IGU (if there was a DMA right after
3251                         * bnx2x_has_rx_work and if there is no rmb, the memory
3252                         * reading (bnx2x_update_fpsb_idx) may be postponed
3253                         * to right before bnx2x_ack_sb). In this case there
3254                         * will never be another interrupt until there is
3255                         * another update of the status block, while there
3256                         * is still unhandled work.
3257                         */
3258                        rmb();
3259
3260                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3261                                if (napi_complete_done(napi, rx_work_done)) {
3262                                        /* Re-enable interrupts */
3263                                        DP(NETIF_MSG_RX_STATUS,
3264                                           "Update index to %d\n", fp->fp_hc_idx);
3265                                        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3266                                                     le16_to_cpu(fp->fp_hc_idx),
3267                                                     IGU_INT_ENABLE, 1);
3268                                }
3269                        } else {
3270                                rx_work_done = budget;
3271                        }
3272                }
3273        }
3274
3275        return rx_work_done;
3276}
3277
3278/* we split the first BD into headers and data BDs
3279 * to ease the pain of our fellow microcode engineers
3280 * we use one mapping for both BDs
3281 */
3282static u16 bnx2x_tx_split(struct bnx2x *bp,
3283                          struct bnx2x_fp_txdata *txdata,
3284                          struct sw_tx_bd *tx_buf,
3285                          struct eth_tx_start_bd **tx_bd, u16 hlen,
3286                          u16 bd_prod)
3287{
3288        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3289        struct eth_tx_bd *d_tx_bd;
3290        dma_addr_t mapping;
3291        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3292
3293        /* first fix first BD */
3294        h_tx_bd->nbytes = cpu_to_le16(hlen);
3295
3296        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3297           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3298
3299        /* now get a new data BD
3300         * (after the pbd) and fill it */
3301        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3302        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3303
3304        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3305                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3306
3307        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3308        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3309        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3310
3311        /* this marks the BD as one that has no individual mapping */
3312        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3313
3314        DP(NETIF_MSG_TX_QUEUED,
3315           "TSO split data size is %d (%x:%x)\n",
3316           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3317
3318        /* update tx_bd */
3319        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3320
3321        return bd_prod;
3322}
3323
3324#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3325#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3326static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3327{
3328        __sum16 tsum = (__force __sum16) csum;
3329
3330        if (fix > 0)
3331                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3332                                  csum_partial(t_header - fix, fix, 0)));
3333
3334        else if (fix < 0)
3335                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3336                                  csum_partial(t_header, -fix, 0)));
3337
3338        return bswab16(tsum);
3339}
3340
3341static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3342{
3343        u32 rc;
3344        __u8 prot = 0;
3345        __be16 protocol;
3346
3347        if (skb->ip_summed != CHECKSUM_PARTIAL)
3348                return XMIT_PLAIN;
3349
3350        protocol = vlan_get_protocol(skb);
3351        if (protocol == htons(ETH_P_IPV6)) {
3352                rc = XMIT_CSUM_V6;
3353                prot = ipv6_hdr(skb)->nexthdr;
3354        } else {
3355                rc = XMIT_CSUM_V4;
3356                prot = ip_hdr(skb)->protocol;
3357        }
3358
3359        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3360                if (inner_ip_hdr(skb)->version == 6) {
3361                        rc |= XMIT_CSUM_ENC_V6;
3362                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3363                                rc |= XMIT_CSUM_TCP;
3364                } else {
3365                        rc |= XMIT_CSUM_ENC_V4;
3366                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3367                                rc |= XMIT_CSUM_TCP;
3368                }
3369        }
3370        if (prot == IPPROTO_TCP)
3371                rc |= XMIT_CSUM_TCP;
3372
3373        if (skb_is_gso(skb)) {
3374                if (skb_is_gso_v6(skb)) {
3375                        rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3376                        if (rc & XMIT_CSUM_ENC)
3377                                rc |= XMIT_GSO_ENC_V6;
3378                } else {
3379                        rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3380                        if (rc & XMIT_CSUM_ENC)
3381                                rc |= XMIT_GSO_ENC_V4;
3382                }
3383        }
3384
3385        return rc;
3386}
3387
3388/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3389#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3390
3391/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3392#define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3393
3394#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3395/* check if packet requires linearization (packet is too fragmented)
3396   no need to check fragmentation if page size > 8K (there will be no
3397   violation to FW restrictions) */
3398static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3399                             u32 xmit_type)
3400{
3401        int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3402        int to_copy = 0, hlen = 0;
3403
3404        if (xmit_type & XMIT_GSO_ENC)
3405                num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3406
3407        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3408                if (xmit_type & XMIT_GSO) {
3409                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3410                        int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3411                        /* Number of windows to check */
3412                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3413                        int wnd_idx = 0;
3414                        int frag_idx = 0;
3415                        u32 wnd_sum = 0;
3416
3417                        /* Headers length */
3418                        if (xmit_type & XMIT_GSO_ENC)
3419                                hlen = (int)(skb_inner_transport_header(skb) -
3420                                             skb->data) +
3421                                             inner_tcp_hdrlen(skb);
3422                        else
3423                                hlen = (int)(skb_transport_header(skb) -
3424                                             skb->data) + tcp_hdrlen(skb);
3425
3426                        /* Amount of data (w/o headers) on linear part of SKB*/
3427                        first_bd_sz = skb_headlen(skb) - hlen;
3428
3429                        wnd_sum  = first_bd_sz;
3430
3431                        /* Calculate the first sum - it's special */
3432                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3433                                wnd_sum +=
3434                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3435
3436                        /* If there was data on linear skb data - check it */
3437                        if (first_bd_sz > 0) {
3438                                if (unlikely(wnd_sum < lso_mss)) {
3439                                        to_copy = 1;
3440                                        goto exit_lbl;
3441                                }
3442
3443                                wnd_sum -= first_bd_sz;
3444                        }
3445
3446                        /* Others are easier: run through the frag list and
3447                           check all windows */
3448                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3449                                wnd_sum +=
3450                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3451
3452                                if (unlikely(wnd_sum < lso_mss)) {
3453                                        to_copy = 1;
3454                                        break;
3455                                }
3456                                wnd_sum -=
3457                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3458                        }
3459                } else {
3460                        /* in non-LSO too fragmented packet should always
3461                           be linearized */
3462                        to_copy = 1;
3463                }
3464        }
3465
3466exit_lbl:
3467        if (unlikely(to_copy))
3468                DP(NETIF_MSG_TX_QUEUED,
3469                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3470                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3471                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3472
3473        return to_copy;
3474}
3475#endif
3476
3477/**
3478 * bnx2x_set_pbd_gso - update PBD in GSO case.
3479 *
3480 * @skb:        packet skb
3481 * @pbd:        parse BD
3482 * @xmit_type:  xmit flags
3483 */
3484static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3485                              struct eth_tx_parse_bd_e1x *pbd,
3486                              u32 xmit_type)
3487{
3488        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3489        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3490        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3491
3492        if (xmit_type & XMIT_GSO_V4) {
3493                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3494                pbd->tcp_pseudo_csum =
3495                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3496                                                   ip_hdr(skb)->daddr,
3497                                                   0, IPPROTO_TCP, 0));
3498        } else {
3499                pbd->tcp_pseudo_csum =
3500                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3501                                                 &ipv6_hdr(skb)->daddr,
3502                                                 0, IPPROTO_TCP, 0));
3503        }
3504
3505        pbd->global_data |=
3506                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3507}
3508
3509/**
3510 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3511 *
3512 * @bp:                 driver handle
3513 * @skb:                packet skb
3514 * @parsing_data:       data to be updated
3515 * @xmit_type:          xmit flags
3516 *
3517 * 57712/578xx related, when skb has encapsulation
3518 */
3519static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3520                                 u32 *parsing_data, u32 xmit_type)
3521{
3522        *parsing_data |=
3523                ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3524                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3525                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3526
3527        if (xmit_type & XMIT_CSUM_TCP) {
3528                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3529                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3530                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3531
3532                return skb_inner_transport_header(skb) +
3533                        inner_tcp_hdrlen(skb) - skb->data;
3534        }
3535
3536        /* We support checksum offload for TCP and UDP only.
3537         * No need to pass the UDP header length - it's a constant.
3538         */
3539        return skb_inner_transport_header(skb) +
3540                sizeof(struct udphdr) - skb->data;
3541}
3542
3543/**
3544 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3545 *
3546 * @bp:                 driver handle
3547 * @skb:                packet skb
3548 * @parsing_data:       data to be updated
3549 * @xmit_type:          xmit flags
3550 *
3551 * 57712/578xx related
3552 */
3553static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3554                                u32 *parsing_data, u32 xmit_type)
3555{
3556        *parsing_data |=
3557                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3558                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3559                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3560
3561        if (xmit_type & XMIT_CSUM_TCP) {
3562                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3563                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3564                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3565
3566                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3567        }
3568        /* We support checksum offload for TCP and UDP only.
3569         * No need to pass the UDP header length - it's a constant.
3570         */
3571        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3572}
3573
3574/* set FW indication according to inner or outer protocols if tunneled */
3575static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3576                               struct eth_tx_start_bd *tx_start_bd,
3577                               u32 xmit_type)
3578{
3579        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3580
3581        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3582                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3583
3584        if (!(xmit_type & XMIT_CSUM_TCP))
3585                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3586}
3587
3588/**
3589 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3590 *
3591 * @bp:         driver handle
3592 * @skb:        packet skb
3593 * @pbd:        parse BD to be updated
3594 * @xmit_type:  xmit flags
3595 */
3596static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3597                             struct eth_tx_parse_bd_e1x *pbd,
3598                             u32 xmit_type)
3599{
3600        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3601
3602        /* for now NS flag is not used in Linux */
3603        pbd->global_data =
3604                cpu_to_le16(hlen |
3605                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3606                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3607
3608        pbd->ip_hlen_w = (skb_transport_header(skb) -
3609                        skb_network_header(skb)) >> 1;
3610
3611        hlen += pbd->ip_hlen_w;
3612
3613        /* We support checksum offload for TCP and UDP only */
3614        if (xmit_type & XMIT_CSUM_TCP)
3615                hlen += tcp_hdrlen(skb) / 2;
3616        else
3617                hlen += sizeof(struct udphdr) / 2;
3618
3619        pbd->total_hlen_w = cpu_to_le16(hlen);
3620        hlen = hlen*2;
3621
3622        if (xmit_type & XMIT_CSUM_TCP) {
3623                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3624
3625        } else {
3626                s8 fix = SKB_CS_OFF(skb); /* signed! */
3627
3628                DP(NETIF_MSG_TX_QUEUED,
3629                   "hlen %d  fix %d  csum before fix %x\n",
3630                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3631
3632                /* HW bug: fixup the CSUM */
3633                pbd->tcp_pseudo_csum =
3634                        bnx2x_csum_fix(skb_transport_header(skb),
3635                                       SKB_CS(skb), fix);
3636
3637                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3638                   pbd->tcp_pseudo_csum);
3639        }
3640
3641        return hlen;
3642}
3643
3644static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3645                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3646                                      struct eth_tx_parse_2nd_bd *pbd2,
3647                                      u16 *global_data,
3648                                      u32 xmit_type)
3649{
3650        u16 hlen_w = 0;
3651        u8 outerip_off, outerip_len = 0;
3652
3653        /* from outer IP to transport */
3654        hlen_w = (skb_inner_transport_header(skb) -
3655                  skb_network_header(skb)) >> 1;
3656
3657        /* transport len */
3658        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3659
3660        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3661
3662        /* outer IP header info */
3663        if (xmit_type & XMIT_CSUM_V4) {
3664                struct iphdr *iph = ip_hdr(skb);
3665                u32 csum = (__force u32)(~iph->check) -
3666                           (__force u32)iph->tot_len -
3667                           (__force u32)iph->frag_off;
3668
3669                outerip_len = iph->ihl << 1;
3670
3671                pbd2->fw_ip_csum_wo_len_flags_frag =
3672                        bswab16(csum_fold((__force __wsum)csum));
3673        } else {
3674                pbd2->fw_ip_hdr_to_payload_w =
3675                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3676                pbd_e2->data.tunnel_data.flags |=
3677                        ETH_TUNNEL_DATA_IPV6_OUTER;
3678        }
3679
3680        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3681
3682        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3683
3684        /* inner IP header info */
3685        if (xmit_type & XMIT_CSUM_ENC_V4) {
3686                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3687
3688                pbd_e2->data.tunnel_data.pseudo_csum =
3689                        bswab16(~csum_tcpudp_magic(
3690                                        inner_ip_hdr(skb)->saddr,
3691                                        inner_ip_hdr(skb)->daddr,
3692                                        0, IPPROTO_TCP, 0));
3693        } else {
3694                pbd_e2->data.tunnel_data.pseudo_csum =
3695                        bswab16(~csum_ipv6_magic(
3696                                        &inner_ipv6_hdr(skb)->saddr,
3697                                        &inner_ipv6_hdr(skb)->daddr,
3698                                        0, IPPROTO_TCP, 0));
3699        }
3700
3701        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3702
3703        *global_data |=
3704                outerip_off |
3705                (outerip_len <<
3706                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3707                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3708                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3709
3710        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3711                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3712                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3713        }
3714}
3715
3716static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3717                                         u32 xmit_type)
3718{
3719        struct ipv6hdr *ipv6;
3720
3721        if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3722                return;
3723
3724        if (xmit_type & XMIT_GSO_ENC_V6)
3725                ipv6 = inner_ipv6_hdr(skb);
3726        else /* XMIT_GSO_V6 */
3727                ipv6 = ipv6_hdr(skb);
3728
3729        if (ipv6->nexthdr == NEXTHDR_IPV6)
3730                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3731}
3732
3733/* called with netif_tx_lock
3734 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3735 * netif_wake_queue()
3736 */
3737netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3738{
3739        struct bnx2x *bp = netdev_priv(dev);
3740
3741        struct netdev_queue *txq;
3742        struct bnx2x_fp_txdata *txdata;
3743        struct sw_tx_bd *tx_buf;
3744        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3745        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3746        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3747        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3748        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3749        u32 pbd_e2_parsing_data = 0;
3750        u16 pkt_prod, bd_prod;
3751        int nbd, txq_index;
3752        dma_addr_t mapping;
3753        u32 xmit_type = bnx2x_xmit_type(bp, skb);
3754        int i;
3755        u8 hlen = 0;
3756        __le16 pkt_size = 0;
3757        struct ethhdr *eth;
3758        u8 mac_type = UNICAST_ADDRESS;
3759
3760#ifdef BNX2X_STOP_ON_ERROR
3761        if (unlikely(bp->panic))
3762                return NETDEV_TX_BUSY;
3763#endif
3764
3765        txq_index = skb_get_queue_mapping(skb);
3766        txq = netdev_get_tx_queue(dev, txq_index);
3767
3768        BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3769
3770        txdata = &bp->bnx2x_txq[txq_index];
3771
3772        /* enable this debug print to view the transmission queue being used
3773        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3774           txq_index, fp_index, txdata_index); */
3775
3776        /* enable this debug print to view the transmission details
3777        DP(NETIF_MSG_TX_QUEUED,
3778           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3779           txdata->cid, fp_index, txdata_index, txdata, fp); */
3780
3781        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3782                        skb_shinfo(skb)->nr_frags +
3783                        BDS_PER_TX_PKT +
3784                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3785                /* Handle special storage cases separately */
3786                if (txdata->tx_ring_size == 0) {
3787                        struct bnx2x_eth_q_stats *q_stats =
3788                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3789                        q_stats->driver_filtered_tx_pkt++;
3790                        dev_kfree_skb(skb);
3791                        return NETDEV_TX_OK;
3792                }
3793                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3794                netif_tx_stop_queue(txq);
3795                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3796
3797                return NETDEV_TX_BUSY;
3798        }
3799
3800        DP(NETIF_MSG_TX_QUEUED,
3801           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3802           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3803           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3804           skb->len);
3805
3806        eth = (struct ethhdr *)skb->data;
3807
3808        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3809        if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3810                if (is_broadcast_ether_addr(eth->h_dest))
3811                        mac_type = BROADCAST_ADDRESS;
3812                else
3813                        mac_type = MULTICAST_ADDRESS;
3814        }
3815
3816#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3817        /* First, check if we need to linearize the skb (due to FW
3818           restrictions). No need to check fragmentation if page size > 8K
3819           (there will be no violation to FW restrictions) */
3820        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3821                /* Statistics of linearization */
3822                bp->lin_cnt++;
3823                if (skb_linearize(skb) != 0) {
3824                        DP(NETIF_MSG_TX_QUEUED,
3825                           "SKB linearization failed - silently dropping this SKB\n");
3826                        dev_kfree_skb_any(skb);
3827                        return NETDEV_TX_OK;
3828                }
3829        }
3830#endif
3831        /* Map skb linear data for DMA */
3832        mapping = dma_map_single(&bp->pdev->dev, skb->data,
3833                                 skb_headlen(skb), DMA_TO_DEVICE);
3834        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3835                DP(NETIF_MSG_TX_QUEUED,
3836                   "SKB mapping failed - silently dropping this SKB\n");
3837                dev_kfree_skb_any(skb);
3838                return NETDEV_TX_OK;
3839        }
3840        /*
3841        Please read carefully. First we use one BD which we mark as start,
3842        then we have a parsing info BD (used for TSO or xsum),
3843        and only then we have the rest of the TSO BDs.
3844        (don't forget to mark the last one as last,
3845        and to unmap only AFTER you write to the BD ...)
3846        And above all, all pdb sizes are in words - NOT DWORDS!
3847        */
3848
3849        /* get current pkt produced now - advance it just before sending packet
3850         * since mapping of pages may fail and cause packet to be dropped
3851         */
3852        pkt_prod = txdata->tx_pkt_prod;
3853        bd_prod = TX_BD(txdata->tx_bd_prod);
3854
3855        /* get a tx_buf and first BD
3856         * tx_start_bd may be changed during SPLIT,
3857         * but first_bd will always stay first
3858         */
3859        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3860        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3861        first_bd = tx_start_bd;
3862
3863        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3864
3865        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3866                if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3867                        bp->eth_stats.ptp_skip_tx_ts++;
3868                        BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3869                } else if (bp->ptp_tx_skb) {
3870                        bp->eth_stats.ptp_skip_tx_ts++;
3871                        netdev_err_once(bp->dev,
3872                                        "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3873                } else {
3874                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3875                        /* schedule check for Tx timestamp */
3876                        bp->ptp_tx_skb = skb_get(skb);
3877                        bp->ptp_tx_start = jiffies;
3878                        schedule_work(&bp->ptp_task);
3879                }
3880        }
3881
3882        /* header nbd: indirectly zero other flags! */
3883        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3884
3885        /* remember the first BD of the packet */
3886        tx_buf->first_bd = txdata->tx_bd_prod;
3887        tx_buf->skb = skb;
3888        tx_buf->flags = 0;
3889
3890        DP(NETIF_MSG_TX_QUEUED,
3891           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3892           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3893
3894        if (skb_vlan_tag_present(skb)) {
3895                tx_start_bd->vlan_or_ethertype =
3896                    cpu_to_le16(skb_vlan_tag_get(skb));
3897                tx_start_bd->bd_flags.as_bitfield |=
3898                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3899        } else {
3900                /* when transmitting in a vf, start bd must hold the ethertype
3901                 * for fw to enforce it
3902                 */
3903                u16 vlan_tci = 0;
3904#ifndef BNX2X_STOP_ON_ERROR
3905                if (IS_VF(bp)) {
3906#endif
3907                        /* Still need to consider inband vlan for enforced */
3908                        if (__vlan_get_tag(skb, &vlan_tci)) {
3909                                tx_start_bd->vlan_or_ethertype =
3910                                        cpu_to_le16(ntohs(eth->h_proto));
3911                        } else {
3912                                tx_start_bd->bd_flags.as_bitfield |=
3913                                        (X_ETH_INBAND_VLAN <<
3914                                         ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3915                                tx_start_bd->vlan_or_ethertype =
3916                                        cpu_to_le16(vlan_tci);
3917                        }
3918#ifndef BNX2X_STOP_ON_ERROR
3919                } else {
3920                        /* used by FW for packet accounting */
3921                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3922                }
3923#endif
3924        }
3925
3926        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3927
3928        /* turn on parsing and get a BD */
3929        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3930
3931        if (xmit_type & XMIT_CSUM)
3932                bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3933
3934        if (!CHIP_IS_E1x(bp)) {
3935                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3936                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3937
3938                if (xmit_type & XMIT_CSUM_ENC) {
3939                        u16 global_data = 0;
3940
3941                        /* Set PBD in enc checksum offload case */
3942                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3943                                                      &pbd_e2_parsing_data,
3944                                                      xmit_type);
3945
3946                        /* turn on 2nd parsing and get a BD */
3947                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3948
3949                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3950
3951                        memset(pbd2, 0, sizeof(*pbd2));
3952
3953                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3954                                (skb_inner_network_header(skb) -
3955                                 skb->data) >> 1;
3956
3957                        if (xmit_type & XMIT_GSO_ENC)
3958                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3959                                                          &global_data,
3960                                                          xmit_type);
3961
3962                        pbd2->global_data = cpu_to_le16(global_data);
3963
3964                        /* add addition parse BD indication to start BD */
3965                        SET_FLAG(tx_start_bd->general_data,
3966                                 ETH_TX_START_BD_PARSE_NBDS, 1);
3967                        /* set encapsulation flag in start BD */
3968                        SET_FLAG(tx_start_bd->general_data,
3969                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3970
3971                        tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3972
3973                        nbd++;
3974                } else if (xmit_type & XMIT_CSUM) {
3975                        /* Set PBD in checksum offload case w/o encapsulation */
3976                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3977                                                     &pbd_e2_parsing_data,
3978                                                     xmit_type);
3979                }
3980
3981                bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3982                /* Add the macs to the parsing BD if this is a vf or if
3983                 * Tx Switching is enabled.
3984                 */
3985                if (IS_VF(bp)) {
3986                        /* override GRE parameters in BD */
3987                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3988                                              &pbd_e2->data.mac_addr.src_mid,
3989                                              &pbd_e2->data.mac_addr.src_lo,
3990                                              eth->h_source);
3991
3992                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3993                                              &pbd_e2->data.mac_addr.dst_mid,
3994                                              &pbd_e2->data.mac_addr.dst_lo,
3995                                              eth->h_dest);
3996                } else {
3997                        if (bp->flags & TX_SWITCHING)
3998                                bnx2x_set_fw_mac_addr(
3999                                                &pbd_e2->data.mac_addr.dst_hi,
4000                                                &pbd_e2->data.mac_addr.dst_mid,
4001                                                &pbd_e2->data.mac_addr.dst_lo,
4002                                                eth->h_dest);
4003#ifdef BNX2X_STOP_ON_ERROR
4004                        /* Enforce security is always set in Stop on Error -
4005                         * source mac should be present in the parsing BD
4006                         */
4007                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4008                                              &pbd_e2->data.mac_addr.src_mid,
4009                                              &pbd_e2->data.mac_addr.src_lo,
4010                                              eth->h_source);
4011#endif
4012                }
4013
4014                SET_FLAG(pbd_e2_parsing_data,
4015                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4016        } else {
4017                u16 global_data = 0;
4018                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4019                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4020                /* Set PBD in checksum offload case */
4021                if (xmit_type & XMIT_CSUM)
4022                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4023
4024                SET_FLAG(global_data,
4025                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4026                pbd_e1x->global_data |= cpu_to_le16(global_data);
4027        }
4028
4029        /* Setup the data pointer of the first BD of the packet */
4030        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4031        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4032        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4033        pkt_size = tx_start_bd->nbytes;
4034
4035        DP(NETIF_MSG_TX_QUEUED,
4036           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4037           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4038           le16_to_cpu(tx_start_bd->nbytes),
4039           tx_start_bd->bd_flags.as_bitfield,
4040           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4041
4042        if (xmit_type & XMIT_GSO) {
4043
4044                DP(NETIF_MSG_TX_QUEUED,
4045                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4046                   skb->len, hlen, skb_headlen(skb),
4047                   skb_shinfo(skb)->gso_size);
4048
4049                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4050
4051                if (unlikely(skb_headlen(skb) > hlen)) {
4052                        nbd++;
4053                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4054                                                 &tx_start_bd, hlen,
4055                                                 bd_prod);
4056                }
4057                if (!CHIP_IS_E1x(bp))
4058                        pbd_e2_parsing_data |=
4059                                (skb_shinfo(skb)->gso_size <<
4060                                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4061                                 ETH_TX_PARSE_BD_E2_LSO_MSS;
4062                else
4063                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4064        }
4065
4066        /* Set the PBD's parsing_data field if not zero
4067         * (for the chips newer than 57711).
4068         */
4069        if (pbd_e2_parsing_data)
4070                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4071
4072        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4073
4074        /* Handle fragmented skb */
4075        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4076                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4077
4078                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4079                                           skb_frag_size(frag), DMA_TO_DEVICE);
4080                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4081                        unsigned int pkts_compl = 0, bytes_compl = 0;
4082
4083                        DP(NETIF_MSG_TX_QUEUED,
4084                           "Unable to map page - dropping packet...\n");
4085
4086                        /* we need unmap all buffers already mapped
4087                         * for this SKB;
4088                         * first_bd->nbd need to be properly updated
4089                         * before call to bnx2x_free_tx_pkt
4090                         */
4091                        first_bd->nbd = cpu_to_le16(nbd);
4092                        bnx2x_free_tx_pkt(bp, txdata,
4093                                          TX_BD(txdata->tx_pkt_prod),
4094                                          &pkts_compl, &bytes_compl);
4095                        return NETDEV_TX_OK;
4096                }
4097
4098                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4099                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4100                if (total_pkt_bd == NULL)
4101                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4102
4103                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4104                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4105                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4106                le16_add_cpu(&pkt_size, skb_frag_size(frag));
4107                nbd++;
4108
4109                DP(NETIF_MSG_TX_QUEUED,
4110                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4111                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4112                   le16_to_cpu(tx_data_bd->nbytes));
4113        }
4114
4115        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4116
4117        /* update with actual num BDs */
4118        first_bd->nbd = cpu_to_le16(nbd);
4119
4120        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4121
4122        /* now send a tx doorbell, counting the next BD
4123         * if the packet contains or ends with it
4124         */
4125        if (TX_BD_POFF(bd_prod) < nbd)
4126                nbd++;
4127
4128        /* total_pkt_bytes should be set on the first data BD if
4129         * it's not an LSO packet and there is more than one
4130         * data BD. In this case pkt_size is limited by an MTU value.
4131         * However we prefer to set it for an LSO packet (while we don't
4132         * have to) in order to save some CPU cycles in a none-LSO
4133         * case, when we much more care about them.
4134         */
4135        if (total_pkt_bd != NULL)
4136                total_pkt_bd->total_pkt_bytes = pkt_size;
4137
4138        if (pbd_e1x)
4139                DP(NETIF_MSG_TX_QUEUED,
4140                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4141                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4142                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4143                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4144                    le16_to_cpu(pbd_e1x->total_hlen_w));
4145        if (pbd_e2)
4146                DP(NETIF_MSG_TX_QUEUED,
4147                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4148                   pbd_e2,
4149                   pbd_e2->data.mac_addr.dst_hi,
4150                   pbd_e2->data.mac_addr.dst_mid,
4151                   pbd_e2->data.mac_addr.dst_lo,
4152                   pbd_e2->data.mac_addr.src_hi,
4153                   pbd_e2->data.mac_addr.src_mid,
4154                   pbd_e2->data.mac_addr.src_lo,
4155                   pbd_e2->parsing_data);
4156        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4157
4158        netdev_tx_sent_queue(txq, skb->len);
4159
4160        skb_tx_timestamp(skb);
4161
4162        txdata->tx_pkt_prod++;
4163        /*
4164         * Make sure that the BD data is updated before updating the producer
4165         * since FW might read the BD right after the producer is updated.
4166         * This is only applicable for weak-ordered memory model archs such
4167         * as IA-64. The following barrier is also mandatory since FW will
4168         * assumes packets must have BDs.
4169         */
4170        wmb();
4171
4172        txdata->tx_db.data.prod += nbd;
4173        /* make sure descriptor update is observed by HW */
4174        wmb();
4175
4176        DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4177
4178        txdata->tx_bd_prod += nbd;
4179
4180        if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4181                netif_tx_stop_queue(txq);
4182
4183                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4184                 * ordering of set_bit() in netif_tx_stop_queue() and read of
4185                 * fp->bd_tx_cons */
4186                smp_mb();
4187
4188                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4189                if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4190                        netif_tx_wake_queue(txq);
4191        }
4192        txdata->tx_pkt++;
4193
4194        return NETDEV_TX_OK;
4195}
4196
4197void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4198{
4199        int mfw_vn = BP_FW_MB_IDX(bp);
4200        u32 tmp;
4201
4202        /* If the shmem shouldn't affect configuration, reflect */
4203        if (!IS_MF_BD(bp)) {
4204                int i;
4205
4206                for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4207                        c2s_map[i] = i;
4208                *c2s_default = 0;
4209
4210                return;
4211        }
4212
4213        tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4214        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4215        c2s_map[0] = tmp & 0xff;
4216        c2s_map[1] = (tmp >> 8) & 0xff;
4217        c2s_map[2] = (tmp >> 16) & 0xff;
4218        c2s_map[3] = (tmp >> 24) & 0xff;
4219
4220        tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4221        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4222        c2s_map[4] = tmp & 0xff;
4223        c2s_map[5] = (tmp >> 8) & 0xff;
4224        c2s_map[6] = (tmp >> 16) & 0xff;
4225        c2s_map[7] = (tmp >> 24) & 0xff;
4226
4227        tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4228        tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4229        *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4230}
4231
4232/**
4233 * bnx2x_setup_tc - routine to configure net_device for multi tc
4234 *
4235 * @dev: net device to configure
4236 * @num_tc: number of traffic classes to enable
4237 *
4238 * callback connected to the ndo_setup_tc function pointer
4239 */
4240int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4241{
4242        struct bnx2x *bp = netdev_priv(dev);
4243        u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4244        int cos, prio, count, offset;
4245
4246        /* setup tc must be called under rtnl lock */
4247        ASSERT_RTNL();
4248
4249        /* no traffic classes requested. Aborting */
4250        if (!num_tc) {
4251                netdev_reset_tc(dev);
4252                return 0;
4253        }
4254
4255        /* requested to support too many traffic classes */
4256        if (num_tc > bp->max_cos) {
4257                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4258                          num_tc, bp->max_cos);
4259                return -EINVAL;
4260        }
4261
4262        /* declare amount of supported traffic classes */
4263        if (netdev_set_num_tc(dev, num_tc)) {
4264                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4265                return -EINVAL;
4266        }
4267
4268        bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4269
4270        /* configure priority to traffic class mapping */
4271        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4272                int outer_prio = c2s_map[prio];
4273
4274                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4275                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4276                   "mapping priority %d to tc %d\n",
4277                   outer_prio, bp->prio_to_cos[outer_prio]);
4278        }
4279
4280        /* Use this configuration to differentiate tc0 from other COSes
4281           This can be used for ets or pfc, and save the effort of setting
4282           up a multio class queue disc or negotiating DCBX with a switch
4283        netdev_set_prio_tc_map(dev, 0, 0);
4284        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4285        for (prio = 1; prio < 16; prio++) {
4286                netdev_set_prio_tc_map(dev, prio, 1);
4287                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4288        } */
4289
4290        /* configure traffic class to transmission queue mapping */
4291        for (cos = 0; cos < bp->max_cos; cos++) {
4292                count = BNX2X_NUM_ETH_QUEUES(bp);
4293                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4294                netdev_set_tc_queue(dev, cos, count, offset);
4295                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4296                   "mapping tc %d to offset %d count %d\n",
4297                   cos, offset, count);
4298        }
4299
4300        return 0;
4301}
4302
4303int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4304                     void *type_data)
4305{
4306        struct tc_mqprio_qopt *mqprio = type_data;
4307
4308        if (type != TC_SETUP_QDISC_MQPRIO)
4309                return -EOPNOTSUPP;
4310
4311        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4312
4313        return bnx2x_setup_tc(dev, mqprio->num_tc);
4314}
4315
4316/* called with rtnl_lock */
4317int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4318{
4319        struct sockaddr *addr = p;
4320        struct bnx2x *bp = netdev_priv(dev);
4321        int rc = 0;
4322
4323        if (!is_valid_ether_addr(addr->sa_data)) {
4324                BNX2X_ERR("Requested MAC address is not valid\n");
4325                return -EINVAL;
4326        }
4327
4328        if (IS_MF_STORAGE_ONLY(bp)) {
4329                BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4330                return -EINVAL;
4331        }
4332
4333        if (netif_running(dev))  {
4334                rc = bnx2x_set_eth_mac(bp, false);
4335                if (rc)
4336                        return rc;
4337        }
4338
4339        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4340
4341        if (netif_running(dev))
4342                rc = bnx2x_set_eth_mac(bp, true);
4343
4344        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4345                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4346
4347        return rc;
4348}
4349
4350static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4351{
4352        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4353        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4354        u8 cos;
4355
4356        /* Common */
4357
4358        if (IS_FCOE_IDX(fp_index)) {
4359                memset(sb, 0, sizeof(union host_hc_status_block));
4360                fp->status_blk_mapping = 0;
4361        } else {
4362                /* status blocks */
4363                if (!CHIP_IS_E1x(bp))
4364                        BNX2X_PCI_FREE(sb->e2_sb,
4365                                       bnx2x_fp(bp, fp_index,
4366                                                status_blk_mapping),
4367                                       sizeof(struct host_hc_status_block_e2));
4368                else
4369                        BNX2X_PCI_FREE(sb->e1x_sb,
4370                                       bnx2x_fp(bp, fp_index,
4371                                                status_blk_mapping),
4372                                       sizeof(struct host_hc_status_block_e1x));
4373        }
4374
4375        /* Rx */
4376        if (!skip_rx_queue(bp, fp_index)) {
4377                bnx2x_free_rx_bds(fp);
4378
4379                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4380                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4381                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4382                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4383                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4384
4385                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4386                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4387                               sizeof(struct eth_fast_path_rx_cqe) *
4388                               NUM_RCQ_BD);
4389
4390                /* SGE ring */
4391                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4392                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4393                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4394                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4395        }
4396
4397        /* Tx */
4398        if (!skip_tx_queue(bp, fp_index)) {
4399                /* fastpath tx rings: tx_buf tx_desc */
4400                for_each_cos_in_tx_queue(fp, cos) {
4401                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4402
4403                        DP(NETIF_MSG_IFDOWN,
4404                           "freeing tx memory of fp %d cos %d cid %d\n",
4405                           fp_index, cos, txdata->cid);
4406
4407                        BNX2X_FREE(txdata->tx_buf_ring);
4408                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4409                                txdata->tx_desc_mapping,
4410                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4411                }
4412        }
4413        /* end of fastpath */
4414}
4415
4416static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4417{
4418        int i;
4419        for_each_cnic_queue(bp, i)
4420                bnx2x_free_fp_mem_at(bp, i);
4421}
4422
4423void bnx2x_free_fp_mem(struct bnx2x *bp)
4424{
4425        int i;
4426        for_each_eth_queue(bp, i)
4427                bnx2x_free_fp_mem_at(bp, i);
4428}
4429
4430static void set_sb_shortcuts(struct bnx2x *bp, int index)
4431{
4432        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4433        if (!CHIP_IS_E1x(bp)) {
4434                bnx2x_fp(bp, index, sb_index_values) =
4435                        (__le16 *)status_blk.e2_sb->sb.index_values;
4436                bnx2x_fp(bp, index, sb_running_index) =
4437                        (__le16 *)status_blk.e2_sb->sb.running_index;
4438        } else {
4439                bnx2x_fp(bp, index, sb_index_values) =
4440                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4441                bnx2x_fp(bp, index, sb_running_index) =
4442                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4443        }
4444}
4445
4446/* Returns the number of actually allocated BDs */
4447static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4448                              int rx_ring_size)
4449{
4450        struct bnx2x *bp = fp->bp;
4451        u16 ring_prod, cqe_ring_prod;
4452        int i, failure_cnt = 0;
4453
4454        fp->rx_comp_cons = 0;
4455        cqe_ring_prod = ring_prod = 0;
4456
4457        /* This routine is called only during fo init so
4458         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4459         */
4460        for (i = 0; i < rx_ring_size; i++) {
4461                if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4462                        failure_cnt++;
4463                        continue;
4464                }
4465                ring_prod = NEXT_RX_IDX(ring_prod);
4466                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4467                WARN_ON(ring_prod <= (i - failure_cnt));
4468        }
4469
4470        if (failure_cnt)
4471                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4472                          i - failure_cnt, fp->index);
4473
4474        fp->rx_bd_prod = ring_prod;
4475        /* Limit the CQE producer by the CQE ring size */
4476        fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4477                               cqe_ring_prod);
4478
4479        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4480
4481        return i - failure_cnt;
4482}
4483
4484static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4485{
4486        int i;
4487
4488        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4489                struct eth_rx_cqe_next_page *nextpg;
4490
4491                nextpg = (struct eth_rx_cqe_next_page *)
4492                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4493                nextpg->addr_hi =
4494                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4495                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4496                nextpg->addr_lo =
4497                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4498                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4499        }
4500}
4501
4502static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4503{
4504        union host_hc_status_block *sb;
4505        struct bnx2x_fastpath *fp = &bp->fp[index];
4506        int ring_size = 0;
4507        u8 cos;
4508        int rx_ring_size = 0;
4509
4510        if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4511                rx_ring_size = MIN_RX_SIZE_NONTPA;
4512                bp->rx_ring_size = rx_ring_size;
4513        } else if (!bp->rx_ring_size) {
4514                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4515
4516                if (CHIP_IS_E3(bp)) {
4517                        u32 cfg = SHMEM_RD(bp,
4518                                           dev_info.port_hw_config[BP_PORT(bp)].
4519                                           default_cfg);
4520
4521                        /* Decrease ring size for 1G functions */
4522                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4523                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4524                                rx_ring_size /= 10;
4525                }
4526
4527                /* allocate at least number of buffers required by FW */
4528                rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4529                                     MIN_RX_SIZE_TPA, rx_ring_size);
4530
4531                bp->rx_ring_size = rx_ring_size;
4532        } else /* if rx_ring_size specified - use it */
4533                rx_ring_size = bp->rx_ring_size;
4534
4535        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4536
4537        /* Common */
4538        sb = &bnx2x_fp(bp, index, status_blk);
4539
4540        if (!IS_FCOE_IDX(index)) {
4541                /* status blocks */
4542                if (!CHIP_IS_E1x(bp)) {
4543                        sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4544                                                    sizeof(struct host_hc_status_block_e2));
4545                        if (!sb->e2_sb)
4546                                goto alloc_mem_err;
4547                } else {
4548                        sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4549                                                     sizeof(struct host_hc_status_block_e1x));
4550                        if (!sb->e1x_sb)
4551                                goto alloc_mem_err;
4552                }
4553        }
4554
4555        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4556         * set shortcuts for it.
4557         */
4558        if (!IS_FCOE_IDX(index))
4559                set_sb_shortcuts(bp, index);
4560
4561        /* Tx */
4562        if (!skip_tx_queue(bp, index)) {
4563                /* fastpath tx rings: tx_buf tx_desc */
4564                for_each_cos_in_tx_queue(fp, cos) {
4565                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4566
4567                        DP(NETIF_MSG_IFUP,
4568                           "allocating tx memory of fp %d cos %d\n",
4569                           index, cos);
4570
4571                        txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4572                                                      sizeof(struct sw_tx_bd),
4573                                                      GFP_KERNEL);
4574                        if (!txdata->tx_buf_ring)
4575                                goto alloc_mem_err;
4576                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4577                                                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4578                        if (!txdata->tx_desc_ring)
4579                                goto alloc_mem_err;
4580                }
4581        }
4582
4583        /* Rx */
4584        if (!skip_rx_queue(bp, index)) {
4585                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4586                bnx2x_fp(bp, index, rx_buf_ring) =
4587                        kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4588                if (!bnx2x_fp(bp, index, rx_buf_ring))
4589                        goto alloc_mem_err;
4590                bnx2x_fp(bp, index, rx_desc_ring) =
4591                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4592                                        sizeof(struct eth_rx_bd) * NUM_RX_BD);
4593                if (!bnx2x_fp(bp, index, rx_desc_ring))
4594                        goto alloc_mem_err;
4595
4596                /* Seed all CQEs by 1s */
4597                bnx2x_fp(bp, index, rx_comp_ring) =
4598                        BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4599                                         sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4600                if (!bnx2x_fp(bp, index, rx_comp_ring))
4601                        goto alloc_mem_err;
4602
4603                /* SGE ring */
4604                bnx2x_fp(bp, index, rx_page_ring) =
4605                        kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4606                                GFP_KERNEL);
4607                if (!bnx2x_fp(bp, index, rx_page_ring))
4608                        goto alloc_mem_err;
4609                bnx2x_fp(bp, index, rx_sge_ring) =
4610                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4611                                        BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4612                if (!bnx2x_fp(bp, index, rx_sge_ring))
4613                        goto alloc_mem_err;
4614                /* RX BD ring */
4615                bnx2x_set_next_page_rx_bd(fp);
4616
4617                /* CQ ring */
4618                bnx2x_set_next_page_rx_cq(fp);
4619
4620                /* BDs */
4621                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4622                if (ring_size < rx_ring_size)
4623                        goto alloc_mem_err;
4624        }
4625
4626        return 0;
4627
4628/* handles low memory cases */
4629alloc_mem_err:
4630        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4631                                                index, ring_size);
4632        /* FW will drop all packets if queue is not big enough,
4633         * In these cases we disable the queue
4634         * Min size is different for OOO, TPA and non-TPA queues
4635         */
4636        if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4637                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4638                        /* release memory allocated for this queue */
4639                        bnx2x_free_fp_mem_at(bp, index);
4640                        return -ENOMEM;
4641        }
4642        return 0;
4643}
4644
4645static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4646{
4647        if (!NO_FCOE(bp))
4648                /* FCoE */
4649                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4650                        /* we will fail load process instead of mark
4651                         * NO_FCOE_FLAG
4652                         */
4653                        return -ENOMEM;
4654
4655        return 0;
4656}
4657
4658static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4659{
4660        int i;
4661
4662        /* 1. Allocate FP for leading - fatal if error
4663         * 2. Allocate RSS - fix number of queues if error
4664         */
4665
4666        /* leading */
4667        if (bnx2x_alloc_fp_mem_at(bp, 0))
4668                return -ENOMEM;
4669
4670        /* RSS */
4671        for_each_nondefault_eth_queue(bp, i)
4672                if (bnx2x_alloc_fp_mem_at(bp, i))
4673                        break;
4674
4675        /* handle memory failures */
4676        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4677                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4678
4679                WARN_ON(delta < 0);
4680                bnx2x_shrink_eth_fp(bp, delta);
4681                if (CNIC_SUPPORT(bp))
4682                        /* move non eth FPs next to last eth FP
4683                         * must be done in that order
4684                         * FCOE_IDX < FWD_IDX < OOO_IDX
4685                         */
4686
4687                        /* move FCoE fp even NO_FCOE_FLAG is on */
4688                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4689                bp->num_ethernet_queues -= delta;
4690                bp->num_queues = bp->num_ethernet_queues +
4691                                 bp->num_cnic_queues;
4692                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4693                          bp->num_queues + delta, bp->num_queues);
4694        }
4695
4696        return 0;
4697}
4698
4699void bnx2x_free_mem_bp(struct bnx2x *bp)
4700{
4701        int i;
4702
4703        for (i = 0; i < bp->fp_array_size; i++)
4704                kfree(bp->fp[i].tpa_info);
4705        kfree(bp->fp);
4706        kfree(bp->sp_objs);
4707        kfree(bp->fp_stats);
4708        kfree(bp->bnx2x_txq);
4709        kfree(bp->msix_table);
4710        kfree(bp->ilt);
4711}
4712
4713int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4714{
4715        struct bnx2x_fastpath *fp;
4716        struct msix_entry *tbl;
4717        struct bnx2x_ilt *ilt;
4718        int msix_table_size = 0;
4719        int fp_array_size, txq_array_size;
4720        int i;
4721
4722        /*
4723         * The biggest MSI-X table we might need is as a maximum number of fast
4724         * path IGU SBs plus default SB (for PF only).
4725         */
4726        msix_table_size = bp->igu_sb_cnt;
4727        if (IS_PF(bp))
4728                msix_table_size++;
4729        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4730
4731        /* fp array: RSS plus CNIC related L2 queues */
4732        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4733        bp->fp_array_size = fp_array_size;
4734        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4735
4736        fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4737        if (!fp)
4738                goto alloc_err;
4739        for (i = 0; i < bp->fp_array_size; i++) {
4740                fp[i].tpa_info =
4741                        kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4742                                sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4743                if (!(fp[i].tpa_info))
4744                        goto alloc_err;
4745        }
4746
4747        bp->fp = fp;
4748
4749        /* allocate sp objs */
4750        bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4751                              GFP_KERNEL);
4752        if (!bp->sp_objs)
4753                goto alloc_err;
4754
4755        /* allocate fp_stats */
4756        bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4757                               GFP_KERNEL);
4758        if (!bp->fp_stats)
4759                goto alloc_err;
4760
4761        /* Allocate memory for the transmission queues array */
4762        txq_array_size =
4763                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4764        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4765
4766        bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4767                                GFP_KERNEL);
4768        if (!bp->bnx2x_txq)
4769                goto alloc_err;
4770
4771        /* msix table */
4772        tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4773        if (!tbl)
4774                goto alloc_err;
4775        bp->msix_table = tbl;
4776
4777        /* ilt */
4778        ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4779        if (!ilt)
4780                goto alloc_err;
4781        bp->ilt = ilt;
4782
4783        return 0;
4784alloc_err:
4785        bnx2x_free_mem_bp(bp);
4786        return -ENOMEM;
4787}
4788
4789int bnx2x_reload_if_running(struct net_device *dev)
4790{
4791        struct bnx2x *bp = netdev_priv(dev);
4792
4793        if (unlikely(!netif_running(dev)))
4794                return 0;
4795
4796        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4797        return bnx2x_nic_load(bp, LOAD_NORMAL);
4798}
4799
4800int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4801{
4802        u32 sel_phy_idx = 0;
4803        if (bp->link_params.num_phys <= 1)
4804                return INT_PHY;
4805
4806        if (bp->link_vars.link_up) {
4807                sel_phy_idx = EXT_PHY1;
4808                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4809                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4810                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4811                        sel_phy_idx = EXT_PHY2;
4812        } else {
4813
4814                switch (bnx2x_phy_selection(&bp->link_params)) {
4815                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4816                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4817                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4818                       sel_phy_idx = EXT_PHY1;
4819                       break;
4820                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4821                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4822                       sel_phy_idx = EXT_PHY2;
4823                       break;
4824                }
4825        }
4826
4827        return sel_phy_idx;
4828}
4829int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4830{
4831        u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4832        /*
4833         * The selected activated PHY is always after swapping (in case PHY
4834         * swapping is enabled). So when swapping is enabled, we need to reverse
4835         * the configuration
4836         */
4837
4838        if (bp->link_params.multi_phy_config &
4839            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4840                if (sel_phy_idx == EXT_PHY1)
4841                        sel_phy_idx = EXT_PHY2;
4842                else if (sel_phy_idx == EXT_PHY2)
4843                        sel_phy_idx = EXT_PHY1;
4844        }
4845        return LINK_CONFIG_IDX(sel_phy_idx);
4846}
4847
4848#ifdef NETDEV_FCOE_WWNN
4849int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4850{
4851        struct bnx2x *bp = netdev_priv(dev);
4852        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4853
4854        switch (type) {
4855        case NETDEV_FCOE_WWNN:
4856                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4857                                cp->fcoe_wwn_node_name_lo);
4858                break;
4859        case NETDEV_FCOE_WWPN:
4860                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4861                                cp->fcoe_wwn_port_name_lo);
4862                break;
4863        default:
4864                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4865                return -EINVAL;
4866        }
4867
4868        return 0;
4869}
4870#endif
4871
4872/* called with rtnl_lock */
4873int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4874{
4875        struct bnx2x *bp = netdev_priv(dev);
4876
4877        if (pci_num_vf(bp->pdev)) {
4878                DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4879                return -EPERM;
4880        }
4881
4882        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4883                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4884                return -EAGAIN;
4885        }
4886
4887        /* This does not race with packet allocation
4888         * because the actual alloc size is
4889         * only updated as part of load
4890         */
4891        dev->mtu = new_mtu;
4892
4893        if (!bnx2x_mtu_allows_gro(new_mtu))
4894                dev->features &= ~NETIF_F_GRO_HW;
4895
4896        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4897                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4898
4899        return bnx2x_reload_if_running(dev);
4900}
4901
4902netdev_features_t bnx2x_fix_features(struct net_device *dev,
4903                                     netdev_features_t features)
4904{
4905        struct bnx2x *bp = netdev_priv(dev);
4906
4907        if (pci_num_vf(bp->pdev)) {
4908                netdev_features_t changed = dev->features ^ features;
4909
4910                /* Revert the requested changes in features if they
4911                 * would require internal reload of PF in bnx2x_set_features().
4912                 */
4913                if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4914                        features &= ~NETIF_F_RXCSUM;
4915                        features |= dev->features & NETIF_F_RXCSUM;
4916                }
4917
4918                if (changed & NETIF_F_LOOPBACK) {
4919                        features &= ~NETIF_F_LOOPBACK;
4920                        features |= dev->features & NETIF_F_LOOPBACK;
4921                }
4922        }
4923
4924        /* TPA requires Rx CSUM offloading */
4925        if (!(features & NETIF_F_RXCSUM))
4926                features &= ~NETIF_F_LRO;
4927
4928        if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4929                features &= ~NETIF_F_GRO_HW;
4930        if (features & NETIF_F_GRO_HW)
4931                features &= ~NETIF_F_LRO;
4932
4933        return features;
4934}
4935
4936int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4937{
4938        struct bnx2x *bp = netdev_priv(dev);
4939        netdev_features_t changes = features ^ dev->features;
4940        bool bnx2x_reload = false;
4941        int rc;
4942
4943        /* VFs or non SRIOV PFs should be able to change loopback feature */
4944        if (!pci_num_vf(bp->pdev)) {
4945                if (features & NETIF_F_LOOPBACK) {
4946                        if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4947                                bp->link_params.loopback_mode = LOOPBACK_BMAC;
4948                                bnx2x_reload = true;
4949                        }
4950                } else {
4951                        if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4952                                bp->link_params.loopback_mode = LOOPBACK_NONE;
4953                                bnx2x_reload = true;
4954                        }
4955                }
4956        }
4957
4958        /* Don't care about GRO changes */
4959        changes &= ~NETIF_F_GRO;
4960
4961        if (changes)
4962                bnx2x_reload = true;
4963
4964        if (bnx2x_reload) {
4965                if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4966                        dev->features = features;
4967                        rc = bnx2x_reload_if_running(dev);
4968                        return rc ? rc : 1;
4969                }
4970                /* else: bnx2x_nic_load() will be called at end of recovery */
4971        }
4972
4973        return 0;
4974}
4975
4976void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4977{
4978        struct bnx2x *bp = netdev_priv(dev);
4979
4980        /* We want the information of the dump logged,
4981         * but calling bnx2x_panic() would kill all chances of recovery.
4982         */
4983        if (!bp->panic)
4984#ifndef BNX2X_STOP_ON_ERROR
4985                bnx2x_panic_dump(bp, false);
4986#else
4987                bnx2x_panic();
4988#endif
4989
4990        /* This allows the netif to be shutdown gracefully before resetting */
4991        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4992}
4993
4994static int __maybe_unused bnx2x_suspend(struct device *dev_d)
4995{
4996        struct pci_dev *pdev = to_pci_dev(dev_d);
4997        struct net_device *dev = pci_get_drvdata(pdev);
4998        struct bnx2x *bp;
4999
5000        if (!dev) {
5001                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5002                return -ENODEV;
5003        }
5004        bp = netdev_priv(dev);
5005
5006        rtnl_lock();
5007
5008        if (!netif_running(dev)) {
5009                rtnl_unlock();
5010                return 0;
5011        }
5012
5013        netif_device_detach(dev);
5014
5015        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5016
5017        rtnl_unlock();
5018
5019        return 0;
5020}
5021
5022static int __maybe_unused bnx2x_resume(struct device *dev_d)
5023{
5024        struct pci_dev *pdev = to_pci_dev(dev_d);
5025        struct net_device *dev = pci_get_drvdata(pdev);
5026        struct bnx2x *bp;
5027        int rc;
5028
5029        if (!dev) {
5030                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5031                return -ENODEV;
5032        }
5033        bp = netdev_priv(dev);
5034
5035        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5036                BNX2X_ERR("Handling parity error recovery. Try again later\n");
5037                return -EAGAIN;
5038        }
5039
5040        rtnl_lock();
5041
5042        if (!netif_running(dev)) {
5043                rtnl_unlock();
5044                return 0;
5045        }
5046
5047        netif_device_attach(dev);
5048
5049        rc = bnx2x_nic_load(bp, LOAD_OPEN);
5050
5051        rtnl_unlock();
5052
5053        return rc;
5054}
5055
5056SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
5057
5058void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5059                              u32 cid)
5060{
5061        if (!cxt) {
5062                BNX2X_ERR("bad context pointer %p\n", cxt);
5063                return;
5064        }
5065
5066        /* ustorm cxt validation */
5067        cxt->ustorm_ag_context.cdu_usage =
5068                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5070        /* xcontext validation */
5071        cxt->xstorm_ag_context.cdu_reserved =
5072                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5073                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5074}
5075
5076static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5077                                    u8 fw_sb_id, u8 sb_index,
5078                                    u8 ticks)
5079{
5080        u32 addr = BAR_CSTRORM_INTMEM +
5081                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5082        REG_WR8(bp, addr, ticks);
5083        DP(NETIF_MSG_IFUP,
5084           "port %x fw_sb_id %d sb_index %d ticks %d\n",
5085           port, fw_sb_id, sb_index, ticks);
5086}
5087
5088static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5089                                    u16 fw_sb_id, u8 sb_index,
5090                                    u8 disable)
5091{
5092        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5093        u32 addr = BAR_CSTRORM_INTMEM +
5094                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5095        u8 flags = REG_RD8(bp, addr);
5096        /* clear and set */
5097        flags &= ~HC_INDEX_DATA_HC_ENABLED;
5098        flags |= enable_flag;
5099        REG_WR8(bp, addr, flags);
5100        DP(NETIF_MSG_IFUP,
5101           "port %x fw_sb_id %d sb_index %d disable %d\n",
5102           port, fw_sb_id, sb_index, disable);
5103}
5104
5105void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5106                                    u8 sb_index, u8 disable, u16 usec)
5107{
5108        int port = BP_PORT(bp);
5109        u8 ticks = usec / BNX2X_BTR;
5110
5111        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5112
5113        disable = disable ? 1 : (usec ? 0 : 1);
5114        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5115}
5116
5117void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5118                            u32 verbose)
5119{
5120        smp_mb__before_atomic();
5121        set_bit(flag, &bp->sp_rtnl_state);
5122        smp_mb__after_atomic();
5123        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5124           flag);
5125        schedule_delayed_work(&bp->sp_rtnl_task, 0);
5126}
5127