linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c
<<
>>
Prefs
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2015 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 */
   9
  10#include <linux/module.h>
  11
  12#include <linux/stringify.h>
  13#include <linux/kernel.h>
  14#include <linux/timer.h>
  15#include <linux/errno.h>
  16#include <linux/ioport.h>
  17#include <linux/slab.h>
  18#include <linux/vmalloc.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <linux/netdevice.h>
  22#include <linux/etherdevice.h>
  23#include <linux/skbuff.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/bitops.h>
  26#include <linux/io.h>
  27#include <linux/irq.h>
  28#include <linux/delay.h>
  29#include <asm/byteorder.h>
  30#include <asm/page.h>
  31#include <linux/time.h>
  32#include <linux/mii.h>
  33#include <linux/if.h>
  34#include <linux/if_vlan.h>
  35#include <net/ip.h>
  36#include <net/tcp.h>
  37#include <net/udp.h>
  38#include <net/checksum.h>
  39#include <net/ip6_checksum.h>
  40#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
  41#include <net/vxlan.h>
  42#endif
  43#ifdef CONFIG_NET_RX_BUSY_POLL
  44#include <net/busy_poll.h>
  45#endif
  46#include <linux/workqueue.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/log2.h>
  50#include <linux/aer.h>
  51#include <linux/bitmap.h>
  52#include <linux/cpu_rmap.h>
  53
  54#include "bnxt_hsi.h"
  55#include "bnxt.h"
  56#include "bnxt_sriov.h"
  57#include "bnxt_ethtool.h"
  58
  59#define BNXT_TX_TIMEOUT         (5 * HZ)
  60
  61static const char version[] =
  62        "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
  63
  64MODULE_LICENSE("GPL");
  65MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
  66MODULE_VERSION(DRV_MODULE_VERSION);
  67
  68#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  70#define BNXT_RX_COPY_THRESH 256
  71
  72#define BNXT_TX_PUSH_THRESH 164
  73
  74enum board_idx {
  75        BCM57301,
  76        BCM57302,
  77        BCM57304,
  78        BCM57402,
  79        BCM57404,
  80        BCM57406,
  81        BCM57304_VF,
  82        BCM57404_VF,
  83};
  84
  85/* indexed by enum above */
  86static const struct {
  87        char *name;
  88} board_info[] = {
  89        { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
  90        { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
  91        { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
  92        { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
  93        { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
  94        { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
  95        { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
  96        { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
  97};
  98
  99static const struct pci_device_id bnxt_pci_tbl[] = {
 100        { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
 101        { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
 102        { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
 103        { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
 104        { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
 105        { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
 106#ifdef CONFIG_BNXT_SRIOV
 107        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
 108        { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
 109#endif
 110        { 0 }
 111};
 112
 113MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
 114
 115static const u16 bnxt_vf_req_snif[] = {
 116        HWRM_FUNC_CFG,
 117        HWRM_PORT_PHY_QCFG,
 118        HWRM_CFA_L2_FILTER_ALLOC,
 119};
 120
 121static bool bnxt_vf_pciid(enum board_idx idx)
 122{
 123        return (idx == BCM57304_VF || idx == BCM57404_VF);
 124}
 125
 126#define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
 127#define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
 128#define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
 129
 130#define BNXT_CP_DB_REARM(db, raw_cons)                                  \
 131                writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
 132
 133#define BNXT_CP_DB(db, raw_cons)                                        \
 134                writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
 135
 136#define BNXT_CP_DB_IRQ_DIS(db)                                          \
 137                writel(DB_CP_IRQ_DIS_FLAGS, db)
 138
 139static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
 140{
 141        /* Tell compiler to fetch tx indices from memory. */
 142        barrier();
 143
 144        return bp->tx_ring_size -
 145                ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
 146}
 147
 148static const u16 bnxt_lhint_arr[] = {
 149        TX_BD_FLAGS_LHINT_512_AND_SMALLER,
 150        TX_BD_FLAGS_LHINT_512_TO_1023,
 151        TX_BD_FLAGS_LHINT_1024_TO_2047,
 152        TX_BD_FLAGS_LHINT_1024_TO_2047,
 153        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 154        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 155        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 156        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 157        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 158        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 159        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 160        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 161        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 162        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 163        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 164        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 165        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 166        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 167        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 168};
 169
 170static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 171{
 172        struct bnxt *bp = netdev_priv(dev);
 173        struct tx_bd *txbd;
 174        struct tx_bd_ext *txbd1;
 175        struct netdev_queue *txq;
 176        int i;
 177        dma_addr_t mapping;
 178        unsigned int length, pad = 0;
 179        u32 len, free_size, vlan_tag_flags, cfa_action, flags;
 180        u16 prod, last_frag;
 181        struct pci_dev *pdev = bp->pdev;
 182        struct bnxt_tx_ring_info *txr;
 183        struct bnxt_sw_tx_bd *tx_buf;
 184
 185        i = skb_get_queue_mapping(skb);
 186        if (unlikely(i >= bp->tx_nr_rings)) {
 187                dev_kfree_skb_any(skb);
 188                return NETDEV_TX_OK;
 189        }
 190
 191        txr = &bp->tx_ring[i];
 192        txq = netdev_get_tx_queue(dev, i);
 193        prod = txr->tx_prod;
 194
 195        free_size = bnxt_tx_avail(bp, txr);
 196        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
 197                netif_tx_stop_queue(txq);
 198                return NETDEV_TX_BUSY;
 199        }
 200
 201        length = skb->len;
 202        len = skb_headlen(skb);
 203        last_frag = skb_shinfo(skb)->nr_frags;
 204
 205        txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 206
 207        txbd->tx_bd_opaque = prod;
 208
 209        tx_buf = &txr->tx_buf_ring[prod];
 210        tx_buf->skb = skb;
 211        tx_buf->nr_frags = last_frag;
 212
 213        vlan_tag_flags = 0;
 214        cfa_action = 0;
 215        if (skb_vlan_tag_present(skb)) {
 216                vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
 217                                 skb_vlan_tag_get(skb);
 218                /* Currently supports 8021Q, 8021AD vlan offloads
 219                 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
 220                 */
 221                if (skb->vlan_proto == htons(ETH_P_8021Q))
 222                        vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
 223        }
 224
 225        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
 226                struct tx_push_buffer *tx_push_buf = txr->tx_push;
 227                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
 228                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
 229                void *pdata = tx_push_buf->data;
 230                u64 *end;
 231                int j, push_len;
 232
 233                /* Set COAL_NOW to be ready quickly for the next push */
 234                tx_push->tx_bd_len_flags_type =
 235                        cpu_to_le32((length << TX_BD_LEN_SHIFT) |
 236                                        TX_BD_TYPE_LONG_TX_BD |
 237                                        TX_BD_FLAGS_LHINT_512_AND_SMALLER |
 238                                        TX_BD_FLAGS_COAL_NOW |
 239                                        TX_BD_FLAGS_PACKET_END |
 240                                        (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
 241
 242                if (skb->ip_summed == CHECKSUM_PARTIAL)
 243                        tx_push1->tx_bd_hsize_lflags =
 244                                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 245                else
 246                        tx_push1->tx_bd_hsize_lflags = 0;
 247
 248                tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 249                tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
 250
 251                end = pdata + length;
 252                end = PTR_ALIGN(end, 8) - 1;
 253                *end = 0;
 254
 255                skb_copy_from_linear_data(skb, pdata, len);
 256                pdata += len;
 257                for (j = 0; j < last_frag; j++) {
 258                        skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
 259                        void *fptr;
 260
 261                        fptr = skb_frag_address_safe(frag);
 262                        if (!fptr)
 263                                goto normal_tx;
 264
 265                        memcpy(pdata, fptr, skb_frag_size(frag));
 266                        pdata += skb_frag_size(frag);
 267                }
 268
 269                txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
 270                txbd->tx_bd_haddr = txr->data_mapping;
 271                prod = NEXT_TX(prod);
 272                txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 273                memcpy(txbd, tx_push1, sizeof(*txbd));
 274                prod = NEXT_TX(prod);
 275                tx_push->doorbell =
 276                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
 277                txr->tx_prod = prod;
 278
 279                netdev_tx_sent_queue(txq, skb->len);
 280
 281                push_len = (length + sizeof(*tx_push) + 7) / 8;
 282                if (push_len > 16) {
 283                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
 284                        __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
 285                                         push_len - 16);
 286                } else {
 287                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
 288                                         push_len);
 289                }
 290
 291                tx_buf->is_push = 1;
 292                goto tx_done;
 293        }
 294
 295normal_tx:
 296        if (length < BNXT_MIN_PKT_SIZE) {
 297                pad = BNXT_MIN_PKT_SIZE - length;
 298                if (skb_pad(skb, pad)) {
 299                        /* SKB already freed. */
 300                        tx_buf->skb = NULL;
 301                        return NETDEV_TX_OK;
 302                }
 303                length = BNXT_MIN_PKT_SIZE;
 304        }
 305
 306        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 307
 308        if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
 309                dev_kfree_skb_any(skb);
 310                tx_buf->skb = NULL;
 311                return NETDEV_TX_OK;
 312        }
 313
 314        dma_unmap_addr_set(tx_buf, mapping, mapping);
 315        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
 316                ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
 317
 318        txbd->tx_bd_haddr = cpu_to_le64(mapping);
 319
 320        prod = NEXT_TX(prod);
 321        txbd1 = (struct tx_bd_ext *)
 322                &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 323
 324        txbd1->tx_bd_hsize_lflags = 0;
 325        if (skb_is_gso(skb)) {
 326                u32 hdr_len;
 327
 328                if (skb->encapsulation)
 329                        hdr_len = skb_inner_network_offset(skb) +
 330                                skb_inner_network_header_len(skb) +
 331                                inner_tcp_hdrlen(skb);
 332                else
 333                        hdr_len = skb_transport_offset(skb) +
 334                                tcp_hdrlen(skb);
 335
 336                txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
 337                                        TX_BD_FLAGS_T_IPID |
 338                                        (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
 339                length = skb_shinfo(skb)->gso_size;
 340                txbd1->tx_bd_mss = cpu_to_le32(length);
 341                length += hdr_len;
 342        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 343                txbd1->tx_bd_hsize_lflags =
 344                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 345                txbd1->tx_bd_mss = 0;
 346        }
 347
 348        length >>= 9;
 349        flags |= bnxt_lhint_arr[length];
 350        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 351
 352        txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 353        txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
 354        for (i = 0; i < last_frag; i++) {
 355                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 356
 357                prod = NEXT_TX(prod);
 358                txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 359
 360                len = skb_frag_size(frag);
 361                mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
 362                                           DMA_TO_DEVICE);
 363
 364                if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 365                        goto tx_dma_error;
 366
 367                tx_buf = &txr->tx_buf_ring[prod];
 368                dma_unmap_addr_set(tx_buf, mapping, mapping);
 369
 370                txbd->tx_bd_haddr = cpu_to_le64(mapping);
 371
 372                flags = len << TX_BD_LEN_SHIFT;
 373                txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 374        }
 375
 376        flags &= ~TX_BD_LEN;
 377        txbd->tx_bd_len_flags_type =
 378                cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
 379                            TX_BD_FLAGS_PACKET_END);
 380
 381        netdev_tx_sent_queue(txq, skb->len);
 382
 383        /* Sync BD data before updating doorbell */
 384        wmb();
 385
 386        prod = NEXT_TX(prod);
 387        txr->tx_prod = prod;
 388
 389        writel(DB_KEY_TX | prod, txr->tx_doorbell);
 390        writel(DB_KEY_TX | prod, txr->tx_doorbell);
 391
 392tx_done:
 393
 394        mmiowb();
 395
 396        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
 397                netif_tx_stop_queue(txq);
 398
 399                /* netif_tx_stop_queue() must be done before checking
 400                 * tx index in bnxt_tx_avail() below, because in
 401                 * bnxt_tx_int(), we update tx index before checking for
 402                 * netif_tx_queue_stopped().
 403                 */
 404                smp_mb();
 405                if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
 406                        netif_tx_wake_queue(txq);
 407        }
 408        return NETDEV_TX_OK;
 409
 410tx_dma_error:
 411        last_frag = i;
 412
 413        /* start back at beginning and unmap skb */
 414        prod = txr->tx_prod;
 415        tx_buf = &txr->tx_buf_ring[prod];
 416        tx_buf->skb = NULL;
 417        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 418                         skb_headlen(skb), PCI_DMA_TODEVICE);
 419        prod = NEXT_TX(prod);
 420
 421        /* unmap remaining mapped pages */
 422        for (i = 0; i < last_frag; i++) {
 423                prod = NEXT_TX(prod);
 424                tx_buf = &txr->tx_buf_ring[prod];
 425                dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 426                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
 427                               PCI_DMA_TODEVICE);
 428        }
 429
 430        dev_kfree_skb_any(skb);
 431        return NETDEV_TX_OK;
 432}
 433
 434static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 435{
 436        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
 437        int index = txr - &bp->tx_ring[0];
 438        struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
 439        u16 cons = txr->tx_cons;
 440        struct pci_dev *pdev = bp->pdev;
 441        int i;
 442        unsigned int tx_bytes = 0;
 443
 444        for (i = 0; i < nr_pkts; i++) {
 445                struct bnxt_sw_tx_bd *tx_buf;
 446                struct sk_buff *skb;
 447                int j, last;
 448
 449                tx_buf = &txr->tx_buf_ring[cons];
 450                cons = NEXT_TX(cons);
 451                skb = tx_buf->skb;
 452                tx_buf->skb = NULL;
 453
 454                if (tx_buf->is_push) {
 455                        tx_buf->is_push = 0;
 456                        goto next_tx_int;
 457                }
 458
 459                dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 460                                 skb_headlen(skb), PCI_DMA_TODEVICE);
 461                last = tx_buf->nr_frags;
 462
 463                for (j = 0; j < last; j++) {
 464                        cons = NEXT_TX(cons);
 465                        tx_buf = &txr->tx_buf_ring[cons];
 466                        dma_unmap_page(
 467                                &pdev->dev,
 468                                dma_unmap_addr(tx_buf, mapping),
 469                                skb_frag_size(&skb_shinfo(skb)->frags[j]),
 470                                PCI_DMA_TODEVICE);
 471                }
 472
 473next_tx_int:
 474                cons = NEXT_TX(cons);
 475
 476                tx_bytes += skb->len;
 477                dev_kfree_skb_any(skb);
 478        }
 479
 480        netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
 481        txr->tx_cons = cons;
 482
 483        /* Need to make the tx_cons update visible to bnxt_start_xmit()
 484         * before checking for netif_tx_queue_stopped().  Without the
 485         * memory barrier, there is a small possibility that bnxt_start_xmit()
 486         * will miss it and cause the queue to be stopped forever.
 487         */
 488        smp_mb();
 489
 490        if (unlikely(netif_tx_queue_stopped(txq)) &&
 491            (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
 492                __netif_tx_lock(txq, smp_processor_id());
 493                if (netif_tx_queue_stopped(txq) &&
 494                    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
 495                    txr->dev_state != BNXT_DEV_STATE_CLOSING)
 496                        netif_tx_wake_queue(txq);
 497                __netif_tx_unlock(txq);
 498        }
 499}
 500
 501static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
 502                                       gfp_t gfp)
 503{
 504        u8 *data;
 505        struct pci_dev *pdev = bp->pdev;
 506
 507        data = kmalloc(bp->rx_buf_size, gfp);
 508        if (!data)
 509                return NULL;
 510
 511        *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
 512                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 513
 514        if (dma_mapping_error(&pdev->dev, *mapping)) {
 515                kfree(data);
 516                data = NULL;
 517        }
 518        return data;
 519}
 520
 521static inline int bnxt_alloc_rx_data(struct bnxt *bp,
 522                                     struct bnxt_rx_ring_info *rxr,
 523                                     u16 prod, gfp_t gfp)
 524{
 525        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 526        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
 527        u8 *data;
 528        dma_addr_t mapping;
 529
 530        data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
 531        if (!data)
 532                return -ENOMEM;
 533
 534        rx_buf->data = data;
 535        dma_unmap_addr_set(rx_buf, mapping, mapping);
 536
 537        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 538
 539        return 0;
 540}
 541
 542static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
 543                               u8 *data)
 544{
 545        u16 prod = rxr->rx_prod;
 546        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
 547        struct rx_bd *cons_bd, *prod_bd;
 548
 549        prod_rx_buf = &rxr->rx_buf_ring[prod];
 550        cons_rx_buf = &rxr->rx_buf_ring[cons];
 551
 552        prod_rx_buf->data = data;
 553
 554        dma_unmap_addr_set(prod_rx_buf, mapping,
 555                           dma_unmap_addr(cons_rx_buf, mapping));
 556
 557        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 558        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
 559
 560        prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
 561}
 562
 563static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
 564{
 565        u16 next, max = rxr->rx_agg_bmap_size;
 566
 567        next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
 568        if (next >= max)
 569                next = find_first_zero_bit(rxr->rx_agg_bmap, max);
 570        return next;
 571}
 572
 573static inline int bnxt_alloc_rx_page(struct bnxt *bp,
 574                                     struct bnxt_rx_ring_info *rxr,
 575                                     u16 prod, gfp_t gfp)
 576{
 577        struct rx_bd *rxbd =
 578                &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 579        struct bnxt_sw_rx_agg_bd *rx_agg_buf;
 580        struct pci_dev *pdev = bp->pdev;
 581        struct page *page;
 582        dma_addr_t mapping;
 583        u16 sw_prod = rxr->rx_sw_agg_prod;
 584        unsigned int offset = 0;
 585
 586        if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
 587                page = rxr->rx_page;
 588                if (!page) {
 589                        page = alloc_page(gfp);
 590                        if (!page)
 591                                return -ENOMEM;
 592                        rxr->rx_page = page;
 593                        rxr->rx_page_offset = 0;
 594                }
 595                offset = rxr->rx_page_offset;
 596                rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
 597                if (rxr->rx_page_offset == PAGE_SIZE)
 598                        rxr->rx_page = NULL;
 599                else
 600                        get_page(page);
 601        } else {
 602                page = alloc_page(gfp);
 603                if (!page)
 604                        return -ENOMEM;
 605        }
 606
 607        mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
 608                               PCI_DMA_FROMDEVICE);
 609        if (dma_mapping_error(&pdev->dev, mapping)) {
 610                __free_page(page);
 611                return -EIO;
 612        }
 613
 614        if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
 615                sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
 616
 617        __set_bit(sw_prod, rxr->rx_agg_bmap);
 618        rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
 619        rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 620
 621        rx_agg_buf->page = page;
 622        rx_agg_buf->offset = offset;
 623        rx_agg_buf->mapping = mapping;
 624        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 625        rxbd->rx_bd_opaque = sw_prod;
 626        return 0;
 627}
 628
 629static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
 630                                   u32 agg_bufs)
 631{
 632        struct bnxt *bp = bnapi->bp;
 633        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 634        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 635        u16 prod = rxr->rx_agg_prod;
 636        u16 sw_prod = rxr->rx_sw_agg_prod;
 637        u32 i;
 638
 639        for (i = 0; i < agg_bufs; i++) {
 640                u16 cons;
 641                struct rx_agg_cmp *agg;
 642                struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
 643                struct rx_bd *prod_bd;
 644                struct page *page;
 645
 646                agg = (struct rx_agg_cmp *)
 647                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
 648                cons = agg->rx_agg_cmp_opaque;
 649                __clear_bit(cons, rxr->rx_agg_bmap);
 650
 651                if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
 652                        sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
 653
 654                __set_bit(sw_prod, rxr->rx_agg_bmap);
 655                prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
 656                cons_rx_buf = &rxr->rx_agg_ring[cons];
 657
 658                /* It is possible for sw_prod to be equal to cons, so
 659                 * set cons_rx_buf->page to NULL first.
 660                 */
 661                page = cons_rx_buf->page;
 662                cons_rx_buf->page = NULL;
 663                prod_rx_buf->page = page;
 664                prod_rx_buf->offset = cons_rx_buf->offset;
 665
 666                prod_rx_buf->mapping = cons_rx_buf->mapping;
 667
 668                prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 669
 670                prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
 671                prod_bd->rx_bd_opaque = sw_prod;
 672
 673                prod = NEXT_RX_AGG(prod);
 674                sw_prod = NEXT_RX_AGG(sw_prod);
 675                cp_cons = NEXT_CMP(cp_cons);
 676        }
 677        rxr->rx_agg_prod = prod;
 678        rxr->rx_sw_agg_prod = sw_prod;
 679}
 680
 681static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
 682                                   struct bnxt_rx_ring_info *rxr, u16 cons,
 683                                   u16 prod, u8 *data, dma_addr_t dma_addr,
 684                                   unsigned int len)
 685{
 686        int err;
 687        struct sk_buff *skb;
 688
 689        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
 690        if (unlikely(err)) {
 691                bnxt_reuse_rx_data(rxr, cons, data);
 692                return NULL;
 693        }
 694
 695        skb = build_skb(data, 0);
 696        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
 697                         PCI_DMA_FROMDEVICE);
 698        if (!skb) {
 699                kfree(data);
 700                return NULL;
 701        }
 702
 703        skb_reserve(skb, BNXT_RX_OFFSET);
 704        skb_put(skb, len);
 705        return skb;
 706}
 707
 708static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
 709                                     struct sk_buff *skb, u16 cp_cons,
 710                                     u32 agg_bufs)
 711{
 712        struct pci_dev *pdev = bp->pdev;
 713        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 714        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 715        u16 prod = rxr->rx_agg_prod;
 716        u32 i;
 717
 718        for (i = 0; i < agg_bufs; i++) {
 719                u16 cons, frag_len;
 720                struct rx_agg_cmp *agg;
 721                struct bnxt_sw_rx_agg_bd *cons_rx_buf;
 722                struct page *page;
 723                dma_addr_t mapping;
 724
 725                agg = (struct rx_agg_cmp *)
 726                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
 727                cons = agg->rx_agg_cmp_opaque;
 728                frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
 729                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 730
 731                cons_rx_buf = &rxr->rx_agg_ring[cons];
 732                skb_fill_page_desc(skb, i, cons_rx_buf->page,
 733                                   cons_rx_buf->offset, frag_len);
 734                __clear_bit(cons, rxr->rx_agg_bmap);
 735
 736                /* It is possible for bnxt_alloc_rx_page() to allocate
 737                 * a sw_prod index that equals the cons index, so we
 738                 * need to clear the cons entry now.
 739                 */
 740                mapping = dma_unmap_addr(cons_rx_buf, mapping);
 741                page = cons_rx_buf->page;
 742                cons_rx_buf->page = NULL;
 743
 744                if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
 745                        struct skb_shared_info *shinfo;
 746                        unsigned int nr_frags;
 747
 748                        shinfo = skb_shinfo(skb);
 749                        nr_frags = --shinfo->nr_frags;
 750                        __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
 751
 752                        dev_kfree_skb(skb);
 753
 754                        cons_rx_buf->page = page;
 755
 756                        /* Update prod since possibly some pages have been
 757                         * allocated already.
 758                         */
 759                        rxr->rx_agg_prod = prod;
 760                        bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
 761                        return NULL;
 762                }
 763
 764                dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
 765                               PCI_DMA_FROMDEVICE);
 766
 767                skb->data_len += frag_len;
 768                skb->len += frag_len;
 769                skb->truesize += PAGE_SIZE;
 770
 771                prod = NEXT_RX_AGG(prod);
 772                cp_cons = NEXT_CMP(cp_cons);
 773        }
 774        rxr->rx_agg_prod = prod;
 775        return skb;
 776}
 777
 778static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 779                               u8 agg_bufs, u32 *raw_cons)
 780{
 781        u16 last;
 782        struct rx_agg_cmp *agg;
 783
 784        *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
 785        last = RING_CMP(*raw_cons);
 786        agg = (struct rx_agg_cmp *)
 787                &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
 788        return RX_AGG_CMP_VALID(agg, *raw_cons);
 789}
 790
 791static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
 792                                            unsigned int len,
 793                                            dma_addr_t mapping)
 794{
 795        struct bnxt *bp = bnapi->bp;
 796        struct pci_dev *pdev = bp->pdev;
 797        struct sk_buff *skb;
 798
 799        skb = napi_alloc_skb(&bnapi->napi, len);
 800        if (!skb)
 801                return NULL;
 802
 803        dma_sync_single_for_cpu(&pdev->dev, mapping,
 804                                bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
 805
 806        memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
 807
 808        dma_sync_single_for_device(&pdev->dev, mapping,
 809                                   bp->rx_copy_thresh,
 810                                   PCI_DMA_FROMDEVICE);
 811
 812        skb_put(skb, len);
 813        return skb;
 814}
 815
 816static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
 817                           u32 *raw_cons, void *cmp)
 818{
 819        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 820        struct rx_cmp *rxcmp = cmp;
 821        u32 tmp_raw_cons = *raw_cons;
 822        u8 cmp_type, agg_bufs = 0;
 823
 824        cmp_type = RX_CMP_TYPE(rxcmp);
 825
 826        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
 827                agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
 828                            RX_CMP_AGG_BUFS) >>
 829                           RX_CMP_AGG_BUFS_SHIFT;
 830        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
 831                struct rx_tpa_end_cmp *tpa_end = cmp;
 832
 833                agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
 834                            RX_TPA_END_CMP_AGG_BUFS) >>
 835                           RX_TPA_END_CMP_AGG_BUFS_SHIFT;
 836        }
 837
 838        if (agg_bufs) {
 839                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
 840                        return -EBUSY;
 841        }
 842        *raw_cons = tmp_raw_cons;
 843        return 0;
 844}
 845
 846static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 847{
 848        if (!rxr->bnapi->in_reset) {
 849                rxr->bnapi->in_reset = true;
 850                set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
 851                schedule_work(&bp->sp_task);
 852        }
 853        rxr->rx_next_cons = 0xffff;
 854}
 855
 856static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 857                           struct rx_tpa_start_cmp *tpa_start,
 858                           struct rx_tpa_start_cmp_ext *tpa_start1)
 859{
 860        u8 agg_id = TPA_START_AGG_ID(tpa_start);
 861        u16 cons, prod;
 862        struct bnxt_tpa_info *tpa_info;
 863        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
 864        struct rx_bd *prod_bd;
 865        dma_addr_t mapping;
 866
 867        cons = tpa_start->rx_tpa_start_cmp_opaque;
 868        prod = rxr->rx_prod;
 869        cons_rx_buf = &rxr->rx_buf_ring[cons];
 870        prod_rx_buf = &rxr->rx_buf_ring[prod];
 871        tpa_info = &rxr->rx_tpa[agg_id];
 872
 873        if (unlikely(cons != rxr->rx_next_cons)) {
 874                bnxt_sched_reset(bp, rxr);
 875                return;
 876        }
 877
 878        prod_rx_buf->data = tpa_info->data;
 879
 880        mapping = tpa_info->mapping;
 881        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 882
 883        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 884
 885        prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
 886
 887        tpa_info->data = cons_rx_buf->data;
 888        cons_rx_buf->data = NULL;
 889        tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
 890
 891        tpa_info->len =
 892                le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
 893                                RX_TPA_START_CMP_LEN_SHIFT;
 894        if (likely(TPA_START_HASH_VALID(tpa_start))) {
 895                u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
 896
 897                tpa_info->hash_type = PKT_HASH_TYPE_L4;
 898                tpa_info->gso_type = SKB_GSO_TCPV4;
 899                /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
 900                if (hash_type == 3)
 901                        tpa_info->gso_type = SKB_GSO_TCPV6;
 902                tpa_info->rss_hash =
 903                        le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
 904        } else {
 905                tpa_info->hash_type = PKT_HASH_TYPE_NONE;
 906                tpa_info->gso_type = 0;
 907                if (netif_msg_rx_err(bp))
 908                        netdev_warn(bp->dev, "TPA packet without valid hash\n");
 909        }
 910        tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
 911        tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
 912
 913        rxr->rx_prod = NEXT_RX(prod);
 914        cons = NEXT_RX(cons);
 915        rxr->rx_next_cons = NEXT_RX(cons);
 916        cons_rx_buf = &rxr->rx_buf_ring[cons];
 917
 918        bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
 919        rxr->rx_prod = NEXT_RX(rxr->rx_prod);
 920        cons_rx_buf->data = NULL;
 921}
 922
 923static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
 924                           u16 cp_cons, u32 agg_bufs)
 925{
 926        if (agg_bufs)
 927                bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
 928}
 929
 930#define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
 931#define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
 932
 933static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
 934                                           struct rx_tpa_end_cmp *tpa_end,
 935                                           struct rx_tpa_end_cmp_ext *tpa_end1,
 936                                           struct sk_buff *skb)
 937{
 938#ifdef CONFIG_INET
 939        struct tcphdr *th;
 940        int payload_off, tcp_opt_len = 0;
 941        int len, nw_off;
 942        u16 segs;
 943
 944        segs = TPA_END_TPA_SEGS(tpa_end);
 945        if (segs == 1)
 946                return skb;
 947
 948        NAPI_GRO_CB(skb)->count = segs;
 949        skb_shinfo(skb)->gso_size =
 950                le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
 951        skb_shinfo(skb)->gso_type = tpa_info->gso_type;
 952        payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
 953                       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
 954                      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
 955        if (TPA_END_GRO_TS(tpa_end))
 956                tcp_opt_len = 12;
 957
 958        if (tpa_info->gso_type == SKB_GSO_TCPV4) {
 959                struct iphdr *iph;
 960
 961                nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
 962                         ETH_HLEN;
 963                skb_set_network_header(skb, nw_off);
 964                iph = ip_hdr(skb);
 965                skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
 966                len = skb->len - skb_transport_offset(skb);
 967                th = tcp_hdr(skb);
 968                th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
 969        } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
 970                struct ipv6hdr *iph;
 971
 972                nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
 973                         ETH_HLEN;
 974                skb_set_network_header(skb, nw_off);
 975                iph = ipv6_hdr(skb);
 976                skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
 977                len = skb->len - skb_transport_offset(skb);
 978                th = tcp_hdr(skb);
 979                th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
 980        } else {
 981                dev_kfree_skb_any(skb);
 982                return NULL;
 983        }
 984        tcp_gro_complete(skb);
 985
 986        if (nw_off) { /* tunnel */
 987                struct udphdr *uh = NULL;
 988
 989                if (skb->protocol == htons(ETH_P_IP)) {
 990                        struct iphdr *iph = (struct iphdr *)skb->data;
 991
 992                        if (iph->protocol == IPPROTO_UDP)
 993                                uh = (struct udphdr *)(iph + 1);
 994                } else {
 995                        struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
 996
 997                        if (iph->nexthdr == IPPROTO_UDP)
 998                                uh = (struct udphdr *)(iph + 1);
 999                }
1000                if (uh) {
1001                        if (uh->check)
1002                                skb_shinfo(skb)->gso_type |=
1003                                        SKB_GSO_UDP_TUNNEL_CSUM;
1004                        else
1005                                skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1006                }
1007        }
1008#endif
1009        return skb;
1010}
1011
1012static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1013                                           struct bnxt_napi *bnapi,
1014                                           u32 *raw_cons,
1015                                           struct rx_tpa_end_cmp *tpa_end,
1016                                           struct rx_tpa_end_cmp_ext *tpa_end1,
1017                                           bool *agg_event)
1018{
1019        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1020        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1021        u8 agg_id = TPA_END_AGG_ID(tpa_end);
1022        u8 *data, agg_bufs;
1023        u16 cp_cons = RING_CMP(*raw_cons);
1024        unsigned int len;
1025        struct bnxt_tpa_info *tpa_info;
1026        dma_addr_t mapping;
1027        struct sk_buff *skb;
1028
1029        if (unlikely(bnapi->in_reset)) {
1030                int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1031
1032                if (rc < 0)
1033                        return ERR_PTR(-EBUSY);
1034                return NULL;
1035        }
1036
1037        tpa_info = &rxr->rx_tpa[agg_id];
1038        data = tpa_info->data;
1039        prefetch(data);
1040        len = tpa_info->len;
1041        mapping = tpa_info->mapping;
1042
1043        agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1044                    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1045
1046        if (agg_bufs) {
1047                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1048                        return ERR_PTR(-EBUSY);
1049
1050                *agg_event = true;
1051                cp_cons = NEXT_CMP(cp_cons);
1052        }
1053
1054        if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1055                bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1056                netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1057                            agg_bufs, (int)MAX_SKB_FRAGS);
1058                return NULL;
1059        }
1060
1061        if (len <= bp->rx_copy_thresh) {
1062                skb = bnxt_copy_skb(bnapi, data, len, mapping);
1063                if (!skb) {
1064                        bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1065                        return NULL;
1066                }
1067        } else {
1068                u8 *new_data;
1069                dma_addr_t new_mapping;
1070
1071                new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1072                if (!new_data) {
1073                        bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1074                        return NULL;
1075                }
1076
1077                tpa_info->data = new_data;
1078                tpa_info->mapping = new_mapping;
1079
1080                skb = build_skb(data, 0);
1081                dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1082                                 PCI_DMA_FROMDEVICE);
1083
1084                if (!skb) {
1085                        kfree(data);
1086                        bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1087                        return NULL;
1088                }
1089                skb_reserve(skb, BNXT_RX_OFFSET);
1090                skb_put(skb, len);
1091        }
1092
1093        if (agg_bufs) {
1094                skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1095                if (!skb) {
1096                        /* Page reuse already handled by bnxt_rx_pages(). */
1097                        return NULL;
1098                }
1099        }
1100        skb->protocol = eth_type_trans(skb, bp->dev);
1101
1102        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1103                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1104
1105        if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1106                netdev_features_t features = skb->dev->features;
1107                u16 vlan_proto = tpa_info->metadata >>
1108                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
1109
1110                if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1111                     vlan_proto == ETH_P_8021Q) ||
1112                    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1113                     vlan_proto == ETH_P_8021AD)) {
1114                        __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1115                                               tpa_info->metadata &
1116                                               RX_CMP_FLAGS2_METADATA_VID_MASK);
1117                }
1118        }
1119
1120        skb_checksum_none_assert(skb);
1121        if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1122                skb->ip_summed = CHECKSUM_UNNECESSARY;
1123                skb->csum_level =
1124                        (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1125        }
1126
1127        if (TPA_END_GRO(tpa_end))
1128                skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1129
1130        return skb;
1131}
1132
1133/* returns the following:
1134 * 1       - 1 packet successfully received
1135 * 0       - successful TPA_START, packet not completed yet
1136 * -EBUSY  - completion ring does not have all the agg buffers yet
1137 * -ENOMEM - packet aborted due to out of memory
1138 * -EIO    - packet aborted due to hw error indicated in BD
1139 */
1140static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1141                       bool *agg_event)
1142{
1143        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1144        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1145        struct net_device *dev = bp->dev;
1146        struct rx_cmp *rxcmp;
1147        struct rx_cmp_ext *rxcmp1;
1148        u32 tmp_raw_cons = *raw_cons;
1149        u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1150        struct bnxt_sw_rx_bd *rx_buf;
1151        unsigned int len;
1152        u8 *data, agg_bufs, cmp_type;
1153        dma_addr_t dma_addr;
1154        struct sk_buff *skb;
1155        int rc = 0;
1156
1157        rxcmp = (struct rx_cmp *)
1158                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1159
1160        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1161        cp_cons = RING_CMP(tmp_raw_cons);
1162        rxcmp1 = (struct rx_cmp_ext *)
1163                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1164
1165        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1166                return -EBUSY;
1167
1168        cmp_type = RX_CMP_TYPE(rxcmp);
1169
1170        prod = rxr->rx_prod;
1171
1172        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1173                bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1174                               (struct rx_tpa_start_cmp_ext *)rxcmp1);
1175
1176                goto next_rx_no_prod;
1177
1178        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1179                skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1180                                   (struct rx_tpa_end_cmp *)rxcmp,
1181                                   (struct rx_tpa_end_cmp_ext *)rxcmp1,
1182                                   agg_event);
1183
1184                if (unlikely(IS_ERR(skb)))
1185                        return -EBUSY;
1186
1187                rc = -ENOMEM;
1188                if (likely(skb)) {
1189                        skb_record_rx_queue(skb, bnapi->index);
1190                        skb_mark_napi_id(skb, &bnapi->napi);
1191                        if (bnxt_busy_polling(bnapi))
1192                                netif_receive_skb(skb);
1193                        else
1194                                napi_gro_receive(&bnapi->napi, skb);
1195                        rc = 1;
1196                }
1197                goto next_rx_no_prod;
1198        }
1199
1200        cons = rxcmp->rx_cmp_opaque;
1201        rx_buf = &rxr->rx_buf_ring[cons];
1202        data = rx_buf->data;
1203        if (unlikely(cons != rxr->rx_next_cons)) {
1204                int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1205
1206                bnxt_sched_reset(bp, rxr);
1207                return rc1;
1208        }
1209        prefetch(data);
1210
1211        agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1212                                RX_CMP_AGG_BUFS_SHIFT;
1213
1214        if (agg_bufs) {
1215                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1216                        return -EBUSY;
1217
1218                cp_cons = NEXT_CMP(cp_cons);
1219                *agg_event = true;
1220        }
1221
1222        rx_buf->data = NULL;
1223        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1224                bnxt_reuse_rx_data(rxr, cons, data);
1225                if (agg_bufs)
1226                        bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1227
1228                rc = -EIO;
1229                goto next_rx;
1230        }
1231
1232        len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1233        dma_addr = dma_unmap_addr(rx_buf, mapping);
1234
1235        if (len <= bp->rx_copy_thresh) {
1236                skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1237                bnxt_reuse_rx_data(rxr, cons, data);
1238                if (!skb) {
1239                        rc = -ENOMEM;
1240                        goto next_rx;
1241                }
1242        } else {
1243                skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1244                if (!skb) {
1245                        rc = -ENOMEM;
1246                        goto next_rx;
1247                }
1248        }
1249
1250        if (agg_bufs) {
1251                skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1252                if (!skb) {
1253                        rc = -ENOMEM;
1254                        goto next_rx;
1255                }
1256        }
1257
1258        if (RX_CMP_HASH_VALID(rxcmp)) {
1259                u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1260                enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1261
1262                /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1263                if (hash_type != 1 && hash_type != 3)
1264                        type = PKT_HASH_TYPE_L3;
1265                skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1266        }
1267
1268        skb->protocol = eth_type_trans(skb, dev);
1269
1270        if (rxcmp1->rx_cmp_flags2 &
1271            cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1272                netdev_features_t features = skb->dev->features;
1273                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1274                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1275
1276                if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1277                     vlan_proto == ETH_P_8021Q) ||
1278                    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1279                     vlan_proto == ETH_P_8021AD))
1280                        __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1281                                               meta_data &
1282                                               RX_CMP_FLAGS2_METADATA_VID_MASK);
1283        }
1284
1285        skb_checksum_none_assert(skb);
1286        if (RX_CMP_L4_CS_OK(rxcmp1)) {
1287                if (dev->features & NETIF_F_RXCSUM) {
1288                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1289                        skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1290                }
1291        } else {
1292                if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1293                        if (dev->features & NETIF_F_RXCSUM)
1294                                cpr->rx_l4_csum_errors++;
1295                }
1296        }
1297
1298        skb_record_rx_queue(skb, bnapi->index);
1299        skb_mark_napi_id(skb, &bnapi->napi);
1300        if (bnxt_busy_polling(bnapi))
1301                netif_receive_skb(skb);
1302        else
1303                napi_gro_receive(&bnapi->napi, skb);
1304        rc = 1;
1305
1306next_rx:
1307        rxr->rx_prod = NEXT_RX(prod);
1308        rxr->rx_next_cons = NEXT_RX(cons);
1309
1310next_rx_no_prod:
1311        *raw_cons = tmp_raw_cons;
1312
1313        return rc;
1314}
1315
1316static int bnxt_async_event_process(struct bnxt *bp,
1317                                    struct hwrm_async_event_cmpl *cmpl)
1318{
1319        u16 event_id = le16_to_cpu(cmpl->event_id);
1320
1321        /* TODO CHIMP_FW: Define event id's for link change, error etc */
1322        switch (event_id) {
1323        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1324                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1325                break;
1326        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1327                set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1328                break;
1329        default:
1330                netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1331                           event_id);
1332                goto async_event_process_exit;
1333        }
1334        schedule_work(&bp->sp_task);
1335async_event_process_exit:
1336        return 0;
1337}
1338
1339static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1340{
1341        u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1342        struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1343        struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1344                                (struct hwrm_fwd_req_cmpl *)txcmp;
1345
1346        switch (cmpl_type) {
1347        case CMPL_BASE_TYPE_HWRM_DONE:
1348                seq_id = le16_to_cpu(h_cmpl->sequence_id);
1349                if (seq_id == bp->hwrm_intr_seq_id)
1350                        bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1351                else
1352                        netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1353                break;
1354
1355        case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1356                vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1357
1358                if ((vf_id < bp->pf.first_vf_id) ||
1359                    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1360                        netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1361                                   vf_id);
1362                        return -EINVAL;
1363                }
1364
1365                set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1366                set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1367                schedule_work(&bp->sp_task);
1368                break;
1369
1370        case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1371                bnxt_async_event_process(bp,
1372                                         (struct hwrm_async_event_cmpl *)txcmp);
1373
1374        default:
1375                break;
1376        }
1377
1378        return 0;
1379}
1380
1381static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1382{
1383        struct bnxt_napi *bnapi = dev_instance;
1384        struct bnxt *bp = bnapi->bp;
1385        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1386        u32 cons = RING_CMP(cpr->cp_raw_cons);
1387
1388        prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1389        napi_schedule(&bnapi->napi);
1390        return IRQ_HANDLED;
1391}
1392
1393static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1394{
1395        u32 raw_cons = cpr->cp_raw_cons;
1396        u16 cons = RING_CMP(raw_cons);
1397        struct tx_cmp *txcmp;
1398
1399        txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1400
1401        return TX_CMP_VALID(txcmp, raw_cons);
1402}
1403
1404static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1405{
1406        struct bnxt_napi *bnapi = dev_instance;
1407        struct bnxt *bp = bnapi->bp;
1408        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1409        u32 cons = RING_CMP(cpr->cp_raw_cons);
1410        u32 int_status;
1411
1412        prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1413
1414        if (!bnxt_has_work(bp, cpr)) {
1415                int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1416                /* return if erroneous interrupt */
1417                if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1418                        return IRQ_NONE;
1419        }
1420
1421        /* disable ring IRQ */
1422        BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1423
1424        /* Return here if interrupt is shared and is disabled. */
1425        if (unlikely(atomic_read(&bp->intr_sem) != 0))
1426                return IRQ_HANDLED;
1427
1428        napi_schedule(&bnapi->napi);
1429        return IRQ_HANDLED;
1430}
1431
1432static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1433{
1434        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1435        u32 raw_cons = cpr->cp_raw_cons;
1436        u32 cons;
1437        int tx_pkts = 0;
1438        int rx_pkts = 0;
1439        bool rx_event = false;
1440        bool agg_event = false;
1441        struct tx_cmp *txcmp;
1442
1443        while (1) {
1444                int rc;
1445
1446                cons = RING_CMP(raw_cons);
1447                txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1448
1449                if (!TX_CMP_VALID(txcmp, raw_cons))
1450                        break;
1451
1452                /* The valid test of the entry must be done first before
1453                 * reading any further.
1454                 */
1455                rmb();
1456                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1457                        tx_pkts++;
1458                        /* return full budget so NAPI will complete. */
1459                        if (unlikely(tx_pkts > bp->tx_wake_thresh))
1460                                rx_pkts = budget;
1461                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1462                        rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1463                        if (likely(rc >= 0))
1464                                rx_pkts += rc;
1465                        else if (rc == -EBUSY)  /* partial completion */
1466                                break;
1467                        rx_event = true;
1468                } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1469                                     CMPL_BASE_TYPE_HWRM_DONE) ||
1470                                    (TX_CMP_TYPE(txcmp) ==
1471                                     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1472                                    (TX_CMP_TYPE(txcmp) ==
1473                                     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1474                        bnxt_hwrm_handler(bp, txcmp);
1475                }
1476                raw_cons = NEXT_RAW_CMP(raw_cons);
1477
1478                if (rx_pkts == budget)
1479                        break;
1480        }
1481
1482        cpr->cp_raw_cons = raw_cons;
1483        /* ACK completion ring before freeing tx ring and producing new
1484         * buffers in rx/agg rings to prevent overflowing the completion
1485         * ring.
1486         */
1487        BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1488
1489        if (tx_pkts)
1490                bnxt_tx_int(bp, bnapi, tx_pkts);
1491
1492        if (rx_event) {
1493                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1494
1495                writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1496                writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1497                if (agg_event) {
1498                        writel(DB_KEY_RX | rxr->rx_agg_prod,
1499                               rxr->rx_agg_doorbell);
1500                        writel(DB_KEY_RX | rxr->rx_agg_prod,
1501                               rxr->rx_agg_doorbell);
1502                }
1503        }
1504        return rx_pkts;
1505}
1506
1507static int bnxt_poll(struct napi_struct *napi, int budget)
1508{
1509        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1510        struct bnxt *bp = bnapi->bp;
1511        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1512        int work_done = 0;
1513
1514        if (!bnxt_lock_napi(bnapi))
1515                return budget;
1516
1517        while (1) {
1518                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1519
1520                if (work_done >= budget)
1521                        break;
1522
1523                if (!bnxt_has_work(bp, cpr)) {
1524                        napi_complete(napi);
1525                        BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1526                        break;
1527                }
1528        }
1529        mmiowb();
1530        bnxt_unlock_napi(bnapi);
1531        return work_done;
1532}
1533
1534#ifdef CONFIG_NET_RX_BUSY_POLL
1535static int bnxt_busy_poll(struct napi_struct *napi)
1536{
1537        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1538        struct bnxt *bp = bnapi->bp;
1539        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1540        int rx_work, budget = 4;
1541
1542        if (atomic_read(&bp->intr_sem) != 0)
1543                return LL_FLUSH_FAILED;
1544
1545        if (!bnxt_lock_poll(bnapi))
1546                return LL_FLUSH_BUSY;
1547
1548        rx_work = bnxt_poll_work(bp, bnapi, budget);
1549
1550        BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1551
1552        bnxt_unlock_poll(bnapi);
1553        return rx_work;
1554}
1555#endif
1556
1557static void bnxt_free_tx_skbs(struct bnxt *bp)
1558{
1559        int i, max_idx;
1560        struct pci_dev *pdev = bp->pdev;
1561
1562        if (!bp->tx_ring)
1563                return;
1564
1565        max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1566        for (i = 0; i < bp->tx_nr_rings; i++) {
1567                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1568                int j;
1569
1570                for (j = 0; j < max_idx;) {
1571                        struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1572                        struct sk_buff *skb = tx_buf->skb;
1573                        int k, last;
1574
1575                        if (!skb) {
1576                                j++;
1577                                continue;
1578                        }
1579
1580                        tx_buf->skb = NULL;
1581
1582                        if (tx_buf->is_push) {
1583                                dev_kfree_skb(skb);
1584                                j += 2;
1585                                continue;
1586                        }
1587
1588                        dma_unmap_single(&pdev->dev,
1589                                         dma_unmap_addr(tx_buf, mapping),
1590                                         skb_headlen(skb),
1591                                         PCI_DMA_TODEVICE);
1592
1593                        last = tx_buf->nr_frags;
1594                        j += 2;
1595                        for (k = 0; k < last; k++, j++) {
1596                                int ring_idx = j & bp->tx_ring_mask;
1597                                skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1598
1599                                tx_buf = &txr->tx_buf_ring[ring_idx];
1600                                dma_unmap_page(
1601                                        &pdev->dev,
1602                                        dma_unmap_addr(tx_buf, mapping),
1603                                        skb_frag_size(frag), PCI_DMA_TODEVICE);
1604                        }
1605                        dev_kfree_skb(skb);
1606                }
1607                netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1608        }
1609}
1610
1611static void bnxt_free_rx_skbs(struct bnxt *bp)
1612{
1613        int i, max_idx, max_agg_idx;
1614        struct pci_dev *pdev = bp->pdev;
1615
1616        if (!bp->rx_ring)
1617                return;
1618
1619        max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1620        max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1621        for (i = 0; i < bp->rx_nr_rings; i++) {
1622                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1623                int j;
1624
1625                if (rxr->rx_tpa) {
1626                        for (j = 0; j < MAX_TPA; j++) {
1627                                struct bnxt_tpa_info *tpa_info =
1628                                                        &rxr->rx_tpa[j];
1629                                u8 *data = tpa_info->data;
1630
1631                                if (!data)
1632                                        continue;
1633
1634                                dma_unmap_single(
1635                                        &pdev->dev,
1636                                        dma_unmap_addr(tpa_info, mapping),
1637                                        bp->rx_buf_use_size,
1638                                        PCI_DMA_FROMDEVICE);
1639
1640                                tpa_info->data = NULL;
1641
1642                                kfree(data);
1643                        }
1644                }
1645
1646                for (j = 0; j < max_idx; j++) {
1647                        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1648                        u8 *data = rx_buf->data;
1649
1650                        if (!data)
1651                                continue;
1652
1653                        dma_unmap_single(&pdev->dev,
1654                                         dma_unmap_addr(rx_buf, mapping),
1655                                         bp->rx_buf_use_size,
1656                                         PCI_DMA_FROMDEVICE);
1657
1658                        rx_buf->data = NULL;
1659
1660                        kfree(data);
1661                }
1662
1663                for (j = 0; j < max_agg_idx; j++) {
1664                        struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1665                                &rxr->rx_agg_ring[j];
1666                        struct page *page = rx_agg_buf->page;
1667
1668                        if (!page)
1669                                continue;
1670
1671                        dma_unmap_page(&pdev->dev,
1672                                       dma_unmap_addr(rx_agg_buf, mapping),
1673                                       BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
1674
1675                        rx_agg_buf->page = NULL;
1676                        __clear_bit(j, rxr->rx_agg_bmap);
1677
1678                        __free_page(page);
1679                }
1680                if (rxr->rx_page) {
1681                        __free_page(rxr->rx_page);
1682                        rxr->rx_page = NULL;
1683                }
1684        }
1685}
1686
1687static void bnxt_free_skbs(struct bnxt *bp)
1688{
1689        bnxt_free_tx_skbs(bp);
1690        bnxt_free_rx_skbs(bp);
1691}
1692
1693static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1694{
1695        struct pci_dev *pdev = bp->pdev;
1696        int i;
1697
1698        for (i = 0; i < ring->nr_pages; i++) {
1699                if (!ring->pg_arr[i])
1700                        continue;
1701
1702                dma_free_coherent(&pdev->dev, ring->page_size,
1703                                  ring->pg_arr[i], ring->dma_arr[i]);
1704
1705                ring->pg_arr[i] = NULL;
1706        }
1707        if (ring->pg_tbl) {
1708                dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1709                                  ring->pg_tbl, ring->pg_tbl_map);
1710                ring->pg_tbl = NULL;
1711        }
1712        if (ring->vmem_size && *ring->vmem) {
1713                vfree(*ring->vmem);
1714                *ring->vmem = NULL;
1715        }
1716}
1717
1718static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1719{
1720        int i;
1721        struct pci_dev *pdev = bp->pdev;
1722
1723        if (ring->nr_pages > 1) {
1724                ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1725                                                  ring->nr_pages * 8,
1726                                                  &ring->pg_tbl_map,
1727                                                  GFP_KERNEL);
1728                if (!ring->pg_tbl)
1729                        return -ENOMEM;
1730        }
1731
1732        for (i = 0; i < ring->nr_pages; i++) {
1733                ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1734                                                     ring->page_size,
1735                                                     &ring->dma_arr[i],
1736                                                     GFP_KERNEL);
1737                if (!ring->pg_arr[i])
1738                        return -ENOMEM;
1739
1740                if (ring->nr_pages > 1)
1741                        ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1742        }
1743
1744        if (ring->vmem_size) {
1745                *ring->vmem = vzalloc(ring->vmem_size);
1746                if (!(*ring->vmem))
1747                        return -ENOMEM;
1748        }
1749        return 0;
1750}
1751
1752static void bnxt_free_rx_rings(struct bnxt *bp)
1753{
1754        int i;
1755
1756        if (!bp->rx_ring)
1757                return;
1758
1759        for (i = 0; i < bp->rx_nr_rings; i++) {
1760                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1761                struct bnxt_ring_struct *ring;
1762
1763                kfree(rxr->rx_tpa);
1764                rxr->rx_tpa = NULL;
1765
1766                kfree(rxr->rx_agg_bmap);
1767                rxr->rx_agg_bmap = NULL;
1768
1769                ring = &rxr->rx_ring_struct;
1770                bnxt_free_ring(bp, ring);
1771
1772                ring = &rxr->rx_agg_ring_struct;
1773                bnxt_free_ring(bp, ring);
1774        }
1775}
1776
1777static int bnxt_alloc_rx_rings(struct bnxt *bp)
1778{
1779        int i, rc, agg_rings = 0, tpa_rings = 0;
1780
1781        if (!bp->rx_ring)
1782                return -ENOMEM;
1783
1784        if (bp->flags & BNXT_FLAG_AGG_RINGS)
1785                agg_rings = 1;
1786
1787        if (bp->flags & BNXT_FLAG_TPA)
1788                tpa_rings = 1;
1789
1790        for (i = 0; i < bp->rx_nr_rings; i++) {
1791                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1792                struct bnxt_ring_struct *ring;
1793
1794                ring = &rxr->rx_ring_struct;
1795
1796                rc = bnxt_alloc_ring(bp, ring);
1797                if (rc)
1798                        return rc;
1799
1800                if (agg_rings) {
1801                        u16 mem_size;
1802
1803                        ring = &rxr->rx_agg_ring_struct;
1804                        rc = bnxt_alloc_ring(bp, ring);
1805                        if (rc)
1806                                return rc;
1807
1808                        rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1809                        mem_size = rxr->rx_agg_bmap_size / 8;
1810                        rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1811                        if (!rxr->rx_agg_bmap)
1812                                return -ENOMEM;
1813
1814                        if (tpa_rings) {
1815                                rxr->rx_tpa = kcalloc(MAX_TPA,
1816                                                sizeof(struct bnxt_tpa_info),
1817                                                GFP_KERNEL);
1818                                if (!rxr->rx_tpa)
1819                                        return -ENOMEM;
1820                        }
1821                }
1822        }
1823        return 0;
1824}
1825
1826static void bnxt_free_tx_rings(struct bnxt *bp)
1827{
1828        int i;
1829        struct pci_dev *pdev = bp->pdev;
1830
1831        if (!bp->tx_ring)
1832                return;
1833
1834        for (i = 0; i < bp->tx_nr_rings; i++) {
1835                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1836                struct bnxt_ring_struct *ring;
1837
1838                if (txr->tx_push) {
1839                        dma_free_coherent(&pdev->dev, bp->tx_push_size,
1840                                          txr->tx_push, txr->tx_push_mapping);
1841                        txr->tx_push = NULL;
1842                }
1843
1844                ring = &txr->tx_ring_struct;
1845
1846                bnxt_free_ring(bp, ring);
1847        }
1848}
1849
1850static int bnxt_alloc_tx_rings(struct bnxt *bp)
1851{
1852        int i, j, rc;
1853        struct pci_dev *pdev = bp->pdev;
1854
1855        bp->tx_push_size = 0;
1856        if (bp->tx_push_thresh) {
1857                int push_size;
1858
1859                push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1860                                        bp->tx_push_thresh);
1861
1862                if (push_size > 256) {
1863                        push_size = 0;
1864                        bp->tx_push_thresh = 0;
1865                }
1866
1867                bp->tx_push_size = push_size;
1868        }
1869
1870        for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1871                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1872                struct bnxt_ring_struct *ring;
1873
1874                ring = &txr->tx_ring_struct;
1875
1876                rc = bnxt_alloc_ring(bp, ring);
1877                if (rc)
1878                        return rc;
1879
1880                if (bp->tx_push_size) {
1881                        dma_addr_t mapping;
1882
1883                        /* One pre-allocated DMA buffer to backup
1884                         * TX push operation
1885                         */
1886                        txr->tx_push = dma_alloc_coherent(&pdev->dev,
1887                                                bp->tx_push_size,
1888                                                &txr->tx_push_mapping,
1889                                                GFP_KERNEL);
1890
1891                        if (!txr->tx_push)
1892                                return -ENOMEM;
1893
1894                        mapping = txr->tx_push_mapping +
1895                                sizeof(struct tx_push_bd);
1896                        txr->data_mapping = cpu_to_le64(mapping);
1897
1898                        memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1899                }
1900                ring->queue_id = bp->q_info[j].queue_id;
1901                if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1902                        j++;
1903        }
1904        return 0;
1905}
1906
1907static void bnxt_free_cp_rings(struct bnxt *bp)
1908{
1909        int i;
1910
1911        if (!bp->bnapi)
1912                return;
1913
1914        for (i = 0; i < bp->cp_nr_rings; i++) {
1915                struct bnxt_napi *bnapi = bp->bnapi[i];
1916                struct bnxt_cp_ring_info *cpr;
1917                struct bnxt_ring_struct *ring;
1918
1919                if (!bnapi)
1920                        continue;
1921
1922                cpr = &bnapi->cp_ring;
1923                ring = &cpr->cp_ring_struct;
1924
1925                bnxt_free_ring(bp, ring);
1926        }
1927}
1928
1929static int bnxt_alloc_cp_rings(struct bnxt *bp)
1930{
1931        int i, rc;
1932
1933        for (i = 0; i < bp->cp_nr_rings; i++) {
1934                struct bnxt_napi *bnapi = bp->bnapi[i];
1935                struct bnxt_cp_ring_info *cpr;
1936                struct bnxt_ring_struct *ring;
1937
1938                if (!bnapi)
1939                        continue;
1940
1941                cpr = &bnapi->cp_ring;
1942                ring = &cpr->cp_ring_struct;
1943
1944                rc = bnxt_alloc_ring(bp, ring);
1945                if (rc)
1946                        return rc;
1947        }
1948        return 0;
1949}
1950
1951static void bnxt_init_ring_struct(struct bnxt *bp)
1952{
1953        int i;
1954
1955        for (i = 0; i < bp->cp_nr_rings; i++) {
1956                struct bnxt_napi *bnapi = bp->bnapi[i];
1957                struct bnxt_cp_ring_info *cpr;
1958                struct bnxt_rx_ring_info *rxr;
1959                struct bnxt_tx_ring_info *txr;
1960                struct bnxt_ring_struct *ring;
1961
1962                if (!bnapi)
1963                        continue;
1964
1965                cpr = &bnapi->cp_ring;
1966                ring = &cpr->cp_ring_struct;
1967                ring->nr_pages = bp->cp_nr_pages;
1968                ring->page_size = HW_CMPD_RING_SIZE;
1969                ring->pg_arr = (void **)cpr->cp_desc_ring;
1970                ring->dma_arr = cpr->cp_desc_mapping;
1971                ring->vmem_size = 0;
1972
1973                rxr = bnapi->rx_ring;
1974                if (!rxr)
1975                        goto skip_rx;
1976
1977                ring = &rxr->rx_ring_struct;
1978                ring->nr_pages = bp->rx_nr_pages;
1979                ring->page_size = HW_RXBD_RING_SIZE;
1980                ring->pg_arr = (void **)rxr->rx_desc_ring;
1981                ring->dma_arr = rxr->rx_desc_mapping;
1982                ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1983                ring->vmem = (void **)&rxr->rx_buf_ring;
1984
1985                ring = &rxr->rx_agg_ring_struct;
1986                ring->nr_pages = bp->rx_agg_nr_pages;
1987                ring->page_size = HW_RXBD_RING_SIZE;
1988                ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1989                ring->dma_arr = rxr->rx_agg_desc_mapping;
1990                ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1991                ring->vmem = (void **)&rxr->rx_agg_ring;
1992
1993skip_rx:
1994                txr = bnapi->tx_ring;
1995                if (!txr)
1996                        continue;
1997
1998                ring = &txr->tx_ring_struct;
1999                ring->nr_pages = bp->tx_nr_pages;
2000                ring->page_size = HW_RXBD_RING_SIZE;
2001                ring->pg_arr = (void **)txr->tx_desc_ring;
2002                ring->dma_arr = txr->tx_desc_mapping;
2003                ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2004                ring->vmem = (void **)&txr->tx_buf_ring;
2005        }
2006}
2007
2008static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2009{
2010        int i;
2011        u32 prod;
2012        struct rx_bd **rx_buf_ring;
2013
2014        rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2015        for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2016                int j;
2017                struct rx_bd *rxbd;
2018
2019                rxbd = rx_buf_ring[i];
2020                if (!rxbd)
2021                        continue;
2022
2023                for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2024                        rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2025                        rxbd->rx_bd_opaque = prod;
2026                }
2027        }
2028}
2029
2030static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2031{
2032        struct net_device *dev = bp->dev;
2033        struct bnxt_rx_ring_info *rxr;
2034        struct bnxt_ring_struct *ring;
2035        u32 prod, type;
2036        int i;
2037
2038        type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2039                RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2040
2041        if (NET_IP_ALIGN == 2)
2042                type |= RX_BD_FLAGS_SOP;
2043
2044        rxr = &bp->rx_ring[ring_nr];
2045        ring = &rxr->rx_ring_struct;
2046        bnxt_init_rxbd_pages(ring, type);
2047
2048        prod = rxr->rx_prod;
2049        for (i = 0; i < bp->rx_ring_size; i++) {
2050                if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2051                        netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2052                                    ring_nr, i, bp->rx_ring_size);
2053                        break;
2054                }
2055                prod = NEXT_RX(prod);
2056        }
2057        rxr->rx_prod = prod;
2058        ring->fw_ring_id = INVALID_HW_RING_ID;
2059
2060        ring = &rxr->rx_agg_ring_struct;
2061        ring->fw_ring_id = INVALID_HW_RING_ID;
2062
2063        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2064                return 0;
2065
2066        type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2067                RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2068
2069        bnxt_init_rxbd_pages(ring, type);
2070
2071        prod = rxr->rx_agg_prod;
2072        for (i = 0; i < bp->rx_agg_ring_size; i++) {
2073                if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2074                        netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2075                                    ring_nr, i, bp->rx_ring_size);
2076                        break;
2077                }
2078                prod = NEXT_RX_AGG(prod);
2079        }
2080        rxr->rx_agg_prod = prod;
2081
2082        if (bp->flags & BNXT_FLAG_TPA) {
2083                if (rxr->rx_tpa) {
2084                        u8 *data;
2085                        dma_addr_t mapping;
2086
2087                        for (i = 0; i < MAX_TPA; i++) {
2088                                data = __bnxt_alloc_rx_data(bp, &mapping,
2089                                                            GFP_KERNEL);
2090                                if (!data)
2091                                        return -ENOMEM;
2092
2093                                rxr->rx_tpa[i].data = data;
2094                                rxr->rx_tpa[i].mapping = mapping;
2095                        }
2096                } else {
2097                        netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2098                        return -ENOMEM;
2099                }
2100        }
2101
2102        return 0;
2103}
2104
2105static int bnxt_init_rx_rings(struct bnxt *bp)
2106{
2107        int i, rc = 0;
2108
2109        for (i = 0; i < bp->rx_nr_rings; i++) {
2110                rc = bnxt_init_one_rx_ring(bp, i);
2111                if (rc)
2112                        break;
2113        }
2114
2115        return rc;
2116}
2117
2118static int bnxt_init_tx_rings(struct bnxt *bp)
2119{
2120        u16 i;
2121
2122        bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2123                                   MAX_SKB_FRAGS + 1);
2124
2125        for (i = 0; i < bp->tx_nr_rings; i++) {
2126                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2127                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2128
2129                ring->fw_ring_id = INVALID_HW_RING_ID;
2130        }
2131
2132        return 0;
2133}
2134
2135static void bnxt_free_ring_grps(struct bnxt *bp)
2136{
2137        kfree(bp->grp_info);
2138        bp->grp_info = NULL;
2139}
2140
2141static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2142{
2143        int i;
2144
2145        if (irq_re_init) {
2146                bp->grp_info = kcalloc(bp->cp_nr_rings,
2147                                       sizeof(struct bnxt_ring_grp_info),
2148                                       GFP_KERNEL);
2149                if (!bp->grp_info)
2150                        return -ENOMEM;
2151        }
2152        for (i = 0; i < bp->cp_nr_rings; i++) {
2153                if (irq_re_init)
2154                        bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2155                bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2156                bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2157                bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2158                bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2159        }
2160        return 0;
2161}
2162
2163static void bnxt_free_vnics(struct bnxt *bp)
2164{
2165        kfree(bp->vnic_info);
2166        bp->vnic_info = NULL;
2167        bp->nr_vnics = 0;
2168}
2169
2170static int bnxt_alloc_vnics(struct bnxt *bp)
2171{
2172        int num_vnics = 1;
2173
2174#ifdef CONFIG_RFS_ACCEL
2175        if (bp->flags & BNXT_FLAG_RFS)
2176                num_vnics += bp->rx_nr_rings;
2177#endif
2178
2179        bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2180                                GFP_KERNEL);
2181        if (!bp->vnic_info)
2182                return -ENOMEM;
2183
2184        bp->nr_vnics = num_vnics;
2185        return 0;
2186}
2187
2188static void bnxt_init_vnics(struct bnxt *bp)
2189{
2190        int i;
2191
2192        for (i = 0; i < bp->nr_vnics; i++) {
2193                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2194
2195                vnic->fw_vnic_id = INVALID_HW_RING_ID;
2196                vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2197                vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2198
2199                if (bp->vnic_info[i].rss_hash_key) {
2200                        if (i == 0)
2201                                prandom_bytes(vnic->rss_hash_key,
2202                                              HW_HASH_KEY_SIZE);
2203                        else
2204                                memcpy(vnic->rss_hash_key,
2205                                       bp->vnic_info[0].rss_hash_key,
2206                                       HW_HASH_KEY_SIZE);
2207                }
2208        }
2209}
2210
2211static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2212{
2213        int pages;
2214
2215        pages = ring_size / desc_per_pg;
2216
2217        if (!pages)
2218                return 1;
2219
2220        pages++;
2221
2222        while (pages & (pages - 1))
2223                pages++;
2224
2225        return pages;
2226}
2227
2228static void bnxt_set_tpa_flags(struct bnxt *bp)
2229{
2230        bp->flags &= ~BNXT_FLAG_TPA;
2231        if (bp->dev->features & NETIF_F_LRO)
2232                bp->flags |= BNXT_FLAG_LRO;
2233        if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2234                bp->flags |= BNXT_FLAG_GRO;
2235}
2236
2237/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2238 * be set on entry.
2239 */
2240void bnxt_set_ring_params(struct bnxt *bp)
2241{
2242        u32 ring_size, rx_size, rx_space;
2243        u32 agg_factor = 0, agg_ring_size = 0;
2244
2245        /* 8 for CRC and VLAN */
2246        rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2247
2248        rx_space = rx_size + NET_SKB_PAD +
2249                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2250
2251        bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2252        ring_size = bp->rx_ring_size;
2253        bp->rx_agg_ring_size = 0;
2254        bp->rx_agg_nr_pages = 0;
2255
2256        if (bp->flags & BNXT_FLAG_TPA)
2257                agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2258
2259        bp->flags &= ~BNXT_FLAG_JUMBO;
2260        if (rx_space > PAGE_SIZE) {
2261                u32 jumbo_factor;
2262
2263                bp->flags |= BNXT_FLAG_JUMBO;
2264                jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2265                if (jumbo_factor > agg_factor)
2266                        agg_factor = jumbo_factor;
2267        }
2268        agg_ring_size = ring_size * agg_factor;
2269
2270        if (agg_ring_size) {
2271                bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2272                                                        RX_DESC_CNT);
2273                if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2274                        u32 tmp = agg_ring_size;
2275
2276                        bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2277                        agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2278                        netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2279                                    tmp, agg_ring_size);
2280                }
2281                bp->rx_agg_ring_size = agg_ring_size;
2282                bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2283                rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2284                rx_space = rx_size + NET_SKB_PAD +
2285                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2286        }
2287
2288        bp->rx_buf_use_size = rx_size;
2289        bp->rx_buf_size = rx_space;
2290
2291        bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2292        bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2293
2294        ring_size = bp->tx_ring_size;
2295        bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2296        bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2297
2298        ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2299        bp->cp_ring_size = ring_size;
2300
2301        bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2302        if (bp->cp_nr_pages > MAX_CP_PAGES) {
2303                bp->cp_nr_pages = MAX_CP_PAGES;
2304                bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2305                netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2306                            ring_size, bp->cp_ring_size);
2307        }
2308        bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2309        bp->cp_ring_mask = bp->cp_bit - 1;
2310}
2311
2312static void bnxt_free_vnic_attributes(struct bnxt *bp)
2313{
2314        int i;
2315        struct bnxt_vnic_info *vnic;
2316        struct pci_dev *pdev = bp->pdev;
2317
2318        if (!bp->vnic_info)
2319                return;
2320
2321        for (i = 0; i < bp->nr_vnics; i++) {
2322                vnic = &bp->vnic_info[i];
2323
2324                kfree(vnic->fw_grp_ids);
2325                vnic->fw_grp_ids = NULL;
2326
2327                kfree(vnic->uc_list);
2328                vnic->uc_list = NULL;
2329
2330                if (vnic->mc_list) {
2331                        dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2332                                          vnic->mc_list, vnic->mc_list_mapping);
2333                        vnic->mc_list = NULL;
2334                }
2335
2336                if (vnic->rss_table) {
2337                        dma_free_coherent(&pdev->dev, PAGE_SIZE,
2338                                          vnic->rss_table,
2339                                          vnic->rss_table_dma_addr);
2340                        vnic->rss_table = NULL;
2341                }
2342
2343                vnic->rss_hash_key = NULL;
2344                vnic->flags = 0;
2345        }
2346}
2347
2348static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2349{
2350        int i, rc = 0, size;
2351        struct bnxt_vnic_info *vnic;
2352        struct pci_dev *pdev = bp->pdev;
2353        int max_rings;
2354
2355        for (i = 0; i < bp->nr_vnics; i++) {
2356                vnic = &bp->vnic_info[i];
2357
2358                if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2359                        int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2360
2361                        if (mem_size > 0) {
2362                                vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2363                                if (!vnic->uc_list) {
2364                                        rc = -ENOMEM;
2365                                        goto out;
2366                                }
2367                        }
2368                }
2369
2370                if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2371                        vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2372                        vnic->mc_list =
2373                                dma_alloc_coherent(&pdev->dev,
2374                                                   vnic->mc_list_size,
2375                                                   &vnic->mc_list_mapping,
2376                                                   GFP_KERNEL);
2377                        if (!vnic->mc_list) {
2378                                rc = -ENOMEM;
2379                                goto out;
2380                        }
2381                }
2382
2383                if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2384                        max_rings = bp->rx_nr_rings;
2385                else
2386                        max_rings = 1;
2387
2388                vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2389                if (!vnic->fw_grp_ids) {
2390                        rc = -ENOMEM;
2391                        goto out;
2392                }
2393
2394                /* Allocate rss table and hash key */
2395                vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2396                                                     &vnic->rss_table_dma_addr,
2397                                                     GFP_KERNEL);
2398                if (!vnic->rss_table) {
2399                        rc = -ENOMEM;
2400                        goto out;
2401                }
2402
2403                size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2404
2405                vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2406                vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2407        }
2408        return 0;
2409
2410out:
2411        return rc;
2412}
2413
2414static void bnxt_free_hwrm_resources(struct bnxt *bp)
2415{
2416        struct pci_dev *pdev = bp->pdev;
2417
2418        dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2419                          bp->hwrm_cmd_resp_dma_addr);
2420
2421        bp->hwrm_cmd_resp_addr = NULL;
2422        if (bp->hwrm_dbg_resp_addr) {
2423                dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2424                                  bp->hwrm_dbg_resp_addr,
2425                                  bp->hwrm_dbg_resp_dma_addr);
2426
2427                bp->hwrm_dbg_resp_addr = NULL;
2428        }
2429}
2430
2431static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2432{
2433        struct pci_dev *pdev = bp->pdev;
2434
2435        bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2436                                                   &bp->hwrm_cmd_resp_dma_addr,
2437                                                   GFP_KERNEL);
2438        if (!bp->hwrm_cmd_resp_addr)
2439                return -ENOMEM;
2440        bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2441                                                    HWRM_DBG_REG_BUF_SIZE,
2442                                                    &bp->hwrm_dbg_resp_dma_addr,
2443                                                    GFP_KERNEL);
2444        if (!bp->hwrm_dbg_resp_addr)
2445                netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2446
2447        return 0;
2448}
2449
2450static void bnxt_free_stats(struct bnxt *bp)
2451{
2452        u32 size, i;
2453        struct pci_dev *pdev = bp->pdev;
2454
2455        if (bp->hw_rx_port_stats) {
2456                dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2457                                  bp->hw_rx_port_stats,
2458                                  bp->hw_rx_port_stats_map);
2459                bp->hw_rx_port_stats = NULL;
2460                bp->flags &= ~BNXT_FLAG_PORT_STATS;
2461        }
2462
2463        if (!bp->bnapi)
2464                return;
2465
2466        size = sizeof(struct ctx_hw_stats);
2467
2468        for (i = 0; i < bp->cp_nr_rings; i++) {
2469                struct bnxt_napi *bnapi = bp->bnapi[i];
2470                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2471
2472                if (cpr->hw_stats) {
2473                        dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2474                                          cpr->hw_stats_map);
2475                        cpr->hw_stats = NULL;
2476                }
2477        }
2478}
2479
2480static int bnxt_alloc_stats(struct bnxt *bp)
2481{
2482        u32 size, i;
2483        struct pci_dev *pdev = bp->pdev;
2484
2485        size = sizeof(struct ctx_hw_stats);
2486
2487        for (i = 0; i < bp->cp_nr_rings; i++) {
2488                struct bnxt_napi *bnapi = bp->bnapi[i];
2489                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2490
2491                cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2492                                                   &cpr->hw_stats_map,
2493                                                   GFP_KERNEL);
2494                if (!cpr->hw_stats)
2495                        return -ENOMEM;
2496
2497                cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2498        }
2499
2500        if (BNXT_PF(bp)) {
2501                bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2502                                         sizeof(struct tx_port_stats) + 1024;
2503
2504                bp->hw_rx_port_stats =
2505                        dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2506                                           &bp->hw_rx_port_stats_map,
2507                                           GFP_KERNEL);
2508                if (!bp->hw_rx_port_stats)
2509                        return -ENOMEM;
2510
2511                bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2512                                       512;
2513                bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2514                                           sizeof(struct rx_port_stats) + 512;
2515                bp->flags |= BNXT_FLAG_PORT_STATS;
2516        }
2517        return 0;
2518}
2519
2520static void bnxt_clear_ring_indices(struct bnxt *bp)
2521{
2522        int i;
2523
2524        if (!bp->bnapi)
2525                return;
2526
2527        for (i = 0; i < bp->cp_nr_rings; i++) {
2528                struct bnxt_napi *bnapi = bp->bnapi[i];
2529                struct bnxt_cp_ring_info *cpr;
2530                struct bnxt_rx_ring_info *rxr;
2531                struct bnxt_tx_ring_info *txr;
2532
2533                if (!bnapi)
2534                        continue;
2535
2536                cpr = &bnapi->cp_ring;
2537                cpr->cp_raw_cons = 0;
2538
2539                txr = bnapi->tx_ring;
2540                if (txr) {
2541                        txr->tx_prod = 0;
2542                        txr->tx_cons = 0;
2543                }
2544
2545                rxr = bnapi->rx_ring;
2546                if (rxr) {
2547                        rxr->rx_prod = 0;
2548                        rxr->rx_agg_prod = 0;
2549                        rxr->rx_sw_agg_prod = 0;
2550                        rxr->rx_next_cons = 0;
2551                }
2552        }
2553}
2554
2555static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2556{
2557#ifdef CONFIG_RFS_ACCEL
2558        int i;
2559
2560        /* Under rtnl_lock and all our NAPIs have been disabled.  It's
2561         * safe to delete the hash table.
2562         */
2563        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2564                struct hlist_head *head;
2565                struct hlist_node *tmp;
2566                struct bnxt_ntuple_filter *fltr;
2567
2568                head = &bp->ntp_fltr_hash_tbl[i];
2569                hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2570                        hlist_del(&fltr->hash);
2571                        kfree(fltr);
2572                }
2573        }
2574        if (irq_reinit) {
2575                kfree(bp->ntp_fltr_bmap);
2576                bp->ntp_fltr_bmap = NULL;
2577        }
2578        bp->ntp_fltr_count = 0;
2579#endif
2580}
2581
2582static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2583{
2584#ifdef CONFIG_RFS_ACCEL
2585        int i, rc = 0;
2586
2587        if (!(bp->flags & BNXT_FLAG_RFS))
2588                return 0;
2589
2590        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2591                INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2592
2593        bp->ntp_fltr_count = 0;
2594        bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2595                                    GFP_KERNEL);
2596
2597        if (!bp->ntp_fltr_bmap)
2598                rc = -ENOMEM;
2599
2600        return rc;
2601#else
2602        return 0;
2603#endif
2604}
2605
2606static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2607{
2608        bnxt_free_vnic_attributes(bp);
2609        bnxt_free_tx_rings(bp);
2610        bnxt_free_rx_rings(bp);
2611        bnxt_free_cp_rings(bp);
2612        bnxt_free_ntp_fltrs(bp, irq_re_init);
2613        if (irq_re_init) {
2614                bnxt_free_stats(bp);
2615                bnxt_free_ring_grps(bp);
2616                bnxt_free_vnics(bp);
2617                kfree(bp->tx_ring);
2618                bp->tx_ring = NULL;
2619                kfree(bp->rx_ring);
2620                bp->rx_ring = NULL;
2621                kfree(bp->bnapi);
2622                bp->bnapi = NULL;
2623        } else {
2624                bnxt_clear_ring_indices(bp);
2625        }
2626}
2627
2628static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2629{
2630        int i, j, rc, size, arr_size;
2631        void *bnapi;
2632
2633        if (irq_re_init) {
2634                /* Allocate bnapi mem pointer array and mem block for
2635                 * all queues
2636                 */
2637                arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2638                                bp->cp_nr_rings);
2639                size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2640                bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2641                if (!bnapi)
2642                        return -ENOMEM;
2643
2644                bp->bnapi = bnapi;
2645                bnapi += arr_size;
2646                for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2647                        bp->bnapi[i] = bnapi;
2648                        bp->bnapi[i]->index = i;
2649                        bp->bnapi[i]->bp = bp;
2650                }
2651
2652                bp->rx_ring = kcalloc(bp->rx_nr_rings,
2653                                      sizeof(struct bnxt_rx_ring_info),
2654                                      GFP_KERNEL);
2655                if (!bp->rx_ring)
2656                        return -ENOMEM;
2657
2658                for (i = 0; i < bp->rx_nr_rings; i++) {
2659                        bp->rx_ring[i].bnapi = bp->bnapi[i];
2660                        bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2661                }
2662
2663                bp->tx_ring = kcalloc(bp->tx_nr_rings,
2664                                      sizeof(struct bnxt_tx_ring_info),
2665                                      GFP_KERNEL);
2666                if (!bp->tx_ring)
2667                        return -ENOMEM;
2668
2669                if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2670                        j = 0;
2671                else
2672                        j = bp->rx_nr_rings;
2673
2674                for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2675                        bp->tx_ring[i].bnapi = bp->bnapi[j];
2676                        bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
2677                }
2678
2679                rc = bnxt_alloc_stats(bp);
2680                if (rc)
2681                        goto alloc_mem_err;
2682
2683                rc = bnxt_alloc_ntp_fltrs(bp);
2684                if (rc)
2685                        goto alloc_mem_err;
2686
2687                rc = bnxt_alloc_vnics(bp);
2688                if (rc)
2689                        goto alloc_mem_err;
2690        }
2691
2692        bnxt_init_ring_struct(bp);
2693
2694        rc = bnxt_alloc_rx_rings(bp);
2695        if (rc)
2696                goto alloc_mem_err;
2697
2698        rc = bnxt_alloc_tx_rings(bp);
2699        if (rc)
2700                goto alloc_mem_err;
2701
2702        rc = bnxt_alloc_cp_rings(bp);
2703        if (rc)
2704                goto alloc_mem_err;
2705
2706        bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2707                                  BNXT_VNIC_UCAST_FLAG;
2708        rc = bnxt_alloc_vnic_attributes(bp);
2709        if (rc)
2710                goto alloc_mem_err;
2711        return 0;
2712
2713alloc_mem_err:
2714        bnxt_free_mem(bp, true);
2715        return rc;
2716}
2717
2718void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2719                            u16 cmpl_ring, u16 target_id)
2720{
2721        struct input *req = request;
2722
2723        req->req_type = cpu_to_le16(req_type);
2724        req->cmpl_ring = cpu_to_le16(cmpl_ring);
2725        req->target_id = cpu_to_le16(target_id);
2726        req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2727}
2728
2729static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2730                                 int timeout, bool silent)
2731{
2732        int i, intr_process, rc;
2733        struct input *req = msg;
2734        u32 *data = msg;
2735        __le32 *resp_len, *valid;
2736        u16 cp_ring_id, len = 0;
2737        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2738
2739        req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
2740        memset(resp, 0, PAGE_SIZE);
2741        cp_ring_id = le16_to_cpu(req->cmpl_ring);
2742        intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2743
2744        /* Write request msg to hwrm channel */
2745        __iowrite32_copy(bp->bar0, data, msg_len / 4);
2746
2747        for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2748                writel(0, bp->bar0 + i);
2749
2750        /* currently supports only one outstanding message */
2751        if (intr_process)
2752                bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
2753
2754        /* Ring channel doorbell */
2755        writel(1, bp->bar0 + 0x100);
2756
2757        if (!timeout)
2758                timeout = DFLT_HWRM_CMD_TIMEOUT;
2759
2760        i = 0;
2761        if (intr_process) {
2762                /* Wait until hwrm response cmpl interrupt is processed */
2763                while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2764                       i++ < timeout) {
2765                        usleep_range(600, 800);
2766                }
2767
2768                if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2769                        netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2770                                   le16_to_cpu(req->req_type));
2771                        return -1;
2772                }
2773        } else {
2774                /* Check if response len is updated */
2775                resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2776                for (i = 0; i < timeout; i++) {
2777                        len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2778                              HWRM_RESP_LEN_SFT;
2779                        if (len)
2780                                break;
2781                        usleep_range(600, 800);
2782                }
2783
2784                if (i >= timeout) {
2785                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2786                                   timeout, le16_to_cpu(req->req_type),
2787                                   le16_to_cpu(req->seq_id), *resp_len);
2788                        return -1;
2789                }
2790
2791                /* Last word of resp contains valid bit */
2792                valid = bp->hwrm_cmd_resp_addr + len - 4;
2793                for (i = 0; i < timeout; i++) {
2794                        if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2795                                break;
2796                        usleep_range(600, 800);
2797                }
2798
2799                if (i >= timeout) {
2800                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2801                                   timeout, le16_to_cpu(req->req_type),
2802                                   le16_to_cpu(req->seq_id), len, *valid);
2803                        return -1;
2804                }
2805        }
2806
2807        rc = le16_to_cpu(resp->error_code);
2808        if (rc && !silent)
2809                netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2810                           le16_to_cpu(resp->req_type),
2811                           le16_to_cpu(resp->seq_id), rc);
2812        return rc;
2813}
2814
2815int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2816{
2817        return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
2818}
2819
2820int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2821{
2822        int rc;
2823
2824        mutex_lock(&bp->hwrm_cmd_lock);
2825        rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2826        mutex_unlock(&bp->hwrm_cmd_lock);
2827        return rc;
2828}
2829
2830int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
2831                             int timeout)
2832{
2833        int rc;
2834
2835        mutex_lock(&bp->hwrm_cmd_lock);
2836        rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
2837        mutex_unlock(&bp->hwrm_cmd_lock);
2838        return rc;
2839}
2840
2841static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2842{
2843        struct hwrm_func_drv_rgtr_input req = {0};
2844        int i;
2845
2846        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2847
2848        req.enables =
2849                cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2850                            FUNC_DRV_RGTR_REQ_ENABLES_VER |
2851                            FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2852
2853        /* TODO: current async event fwd bits are not defined and the firmware
2854         * only checks if it is non-zero to enable async event forwarding
2855         */
2856        req.async_event_fwd[0] |= cpu_to_le32(1);
2857        req.os_type = cpu_to_le16(1);
2858        req.ver_maj = DRV_VER_MAJ;
2859        req.ver_min = DRV_VER_MIN;
2860        req.ver_upd = DRV_VER_UPD;
2861
2862        if (BNXT_PF(bp)) {
2863                DECLARE_BITMAP(vf_req_snif_bmap, 256);
2864                u32 *data = (u32 *)vf_req_snif_bmap;
2865
2866                memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2867                for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2868                        __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2869
2870                for (i = 0; i < 8; i++)
2871                        req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2872
2873                req.enables |=
2874                        cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2875        }
2876
2877        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2878}
2879
2880static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2881{
2882        struct hwrm_func_drv_unrgtr_input req = {0};
2883
2884        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2885        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2886}
2887
2888static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2889{
2890        u32 rc = 0;
2891        struct hwrm_tunnel_dst_port_free_input req = {0};
2892
2893        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2894        req.tunnel_type = tunnel_type;
2895
2896        switch (tunnel_type) {
2897        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2898                req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2899                break;
2900        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2901                req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2902                break;
2903        default:
2904                break;
2905        }
2906
2907        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2908        if (rc)
2909                netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2910                           rc);
2911        return rc;
2912}
2913
2914static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2915                                           u8 tunnel_type)
2916{
2917        u32 rc = 0;
2918        struct hwrm_tunnel_dst_port_alloc_input req = {0};
2919        struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2920
2921        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2922
2923        req.tunnel_type = tunnel_type;
2924        req.tunnel_dst_port_val = port;
2925
2926        mutex_lock(&bp->hwrm_cmd_lock);
2927        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2928        if (rc) {
2929                netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2930                           rc);
2931                goto err_out;
2932        }
2933
2934        if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2935                bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2936
2937        else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2938                bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2939err_out:
2940        mutex_unlock(&bp->hwrm_cmd_lock);
2941        return rc;
2942}
2943
2944static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2945{
2946        struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2947        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2948
2949        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
2950        req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
2951
2952        req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2953        req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2954        req.mask = cpu_to_le32(vnic->rx_mask);
2955        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2956}
2957
2958#ifdef CONFIG_RFS_ACCEL
2959static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2960                                            struct bnxt_ntuple_filter *fltr)
2961{
2962        struct hwrm_cfa_ntuple_filter_free_input req = {0};
2963
2964        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2965        req.ntuple_filter_id = fltr->filter_id;
2966        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2967}
2968
2969#define BNXT_NTP_FLTR_FLAGS                                     \
2970        (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
2971         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
2972         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
2973         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
2974         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
2975         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
2976         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
2977         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
2978         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
2979         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
2980         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
2981         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
2982         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
2983         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
2984
2985static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2986                                             struct bnxt_ntuple_filter *fltr)
2987{
2988        int rc = 0;
2989        struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2990        struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2991                bp->hwrm_cmd_resp_addr;
2992        struct flow_keys *keys = &fltr->fkeys;
2993        struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2994
2995        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2996        req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2997
2998        req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2999
3000        req.ethertype = htons(ETH_P_IP);
3001        memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3002        req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3003        req.ip_protocol = keys->basic.ip_proto;
3004
3005        req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3006        req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3007        req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3008        req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3009
3010        req.src_port = keys->ports.src;
3011        req.src_port_mask = cpu_to_be16(0xffff);
3012        req.dst_port = keys->ports.dst;
3013        req.dst_port_mask = cpu_to_be16(0xffff);
3014
3015        req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3016        mutex_lock(&bp->hwrm_cmd_lock);
3017        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3018        if (!rc)
3019                fltr->filter_id = resp->ntuple_filter_id;
3020        mutex_unlock(&bp->hwrm_cmd_lock);
3021        return rc;
3022}
3023#endif
3024
3025static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3026                                     u8 *mac_addr)
3027{
3028        u32 rc = 0;
3029        struct hwrm_cfa_l2_filter_alloc_input req = {0};
3030        struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3031
3032        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3033        req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
3034                                CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3035        req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3036        req.enables =
3037                cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3038                            CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3039                            CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3040        memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3041        req.l2_addr_mask[0] = 0xff;
3042        req.l2_addr_mask[1] = 0xff;
3043        req.l2_addr_mask[2] = 0xff;
3044        req.l2_addr_mask[3] = 0xff;
3045        req.l2_addr_mask[4] = 0xff;
3046        req.l2_addr_mask[5] = 0xff;
3047
3048        mutex_lock(&bp->hwrm_cmd_lock);
3049        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3050        if (!rc)
3051                bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3052                                                        resp->l2_filter_id;
3053        mutex_unlock(&bp->hwrm_cmd_lock);
3054        return rc;
3055}
3056
3057static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3058{
3059        u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3060        int rc = 0;
3061
3062        /* Any associated ntuple filters will also be cleared by firmware. */
3063        mutex_lock(&bp->hwrm_cmd_lock);
3064        for (i = 0; i < num_of_vnics; i++) {
3065                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3066
3067                for (j = 0; j < vnic->uc_filter_count; j++) {
3068                        struct hwrm_cfa_l2_filter_free_input req = {0};
3069
3070                        bnxt_hwrm_cmd_hdr_init(bp, &req,
3071                                               HWRM_CFA_L2_FILTER_FREE, -1, -1);
3072
3073                        req.l2_filter_id = vnic->fw_l2_filter_id[j];
3074
3075                        rc = _hwrm_send_message(bp, &req, sizeof(req),
3076                                                HWRM_CMD_TIMEOUT);
3077                }
3078                vnic->uc_filter_count = 0;
3079        }
3080        mutex_unlock(&bp->hwrm_cmd_lock);
3081
3082        return rc;
3083}
3084
3085static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3086{
3087        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3088        struct hwrm_vnic_tpa_cfg_input req = {0};
3089
3090        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3091
3092        if (tpa_flags) {
3093                u16 mss = bp->dev->mtu - 40;
3094                u32 nsegs, n, segs = 0, flags;
3095
3096                flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3097                        VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3098                        VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3099                        VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3100                        VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3101                if (tpa_flags & BNXT_FLAG_GRO)
3102                        flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3103
3104                req.flags = cpu_to_le32(flags);
3105
3106                req.enables =
3107                        cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3108                                    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3109                                    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3110
3111                /* Number of segs are log2 units, and first packet is not
3112                 * included as part of this units.
3113                 */
3114                if (mss <= BNXT_RX_PAGE_SIZE) {
3115                        n = BNXT_RX_PAGE_SIZE / mss;
3116                        nsegs = (MAX_SKB_FRAGS - 1) * n;
3117                } else {
3118                        n = mss / BNXT_RX_PAGE_SIZE;
3119                        if (mss & (BNXT_RX_PAGE_SIZE - 1))
3120                                n++;
3121                        nsegs = (MAX_SKB_FRAGS - n) / n;
3122                }
3123
3124                segs = ilog2(nsegs);
3125                req.max_agg_segs = cpu_to_le16(segs);
3126                req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3127
3128                req.min_agg_len = cpu_to_le32(512);
3129        }
3130        req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3131
3132        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3133}
3134
3135static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3136{
3137        u32 i, j, max_rings;
3138        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3139        struct hwrm_vnic_rss_cfg_input req = {0};
3140
3141        if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
3142                return 0;
3143
3144        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3145        if (set_rss) {
3146                vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
3147                                 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
3148                                 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
3149                                 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
3150
3151                req.hash_type = cpu_to_le32(vnic->hash_type);
3152
3153                if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3154                        max_rings = bp->rx_nr_rings;
3155                else
3156                        max_rings = 1;
3157
3158                /* Fill the RSS indirection table with ring group ids */
3159                for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3160                        if (j == max_rings)
3161                                j = 0;
3162                        vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3163                }
3164
3165                req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3166                req.hash_key_tbl_addr =
3167                        cpu_to_le64(vnic->rss_hash_key_dma_addr);
3168        }
3169        req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3170        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3171}
3172
3173static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3174{
3175        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3176        struct hwrm_vnic_plcmodes_cfg_input req = {0};
3177
3178        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3179        req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3180                                VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3181                                VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3182        req.enables =
3183                cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3184                            VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3185        /* thresholds not implemented in firmware yet */
3186        req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3187        req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3188        req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3189        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3190}
3191
3192static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3193{
3194        struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3195
3196        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3197        req.rss_cos_lb_ctx_id =
3198                cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3199
3200        hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3201        bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3202}
3203
3204static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3205{
3206        int i;
3207
3208        for (i = 0; i < bp->nr_vnics; i++) {
3209                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3210
3211                if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3212                        bnxt_hwrm_vnic_ctx_free_one(bp, i);
3213        }
3214        bp->rsscos_nr_ctxs = 0;
3215}
3216
3217static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3218{
3219        int rc;
3220        struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3221        struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3222                                                bp->hwrm_cmd_resp_addr;
3223
3224        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3225                               -1);
3226
3227        mutex_lock(&bp->hwrm_cmd_lock);
3228        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3229        if (!rc)
3230                bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3231                        le16_to_cpu(resp->rss_cos_lb_ctx_id);
3232        mutex_unlock(&bp->hwrm_cmd_lock);
3233
3234        return rc;
3235}
3236
3237static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3238{
3239        unsigned int ring = 0, grp_idx;
3240        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3241        struct hwrm_vnic_cfg_input req = {0};
3242
3243        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3244        /* Only RSS support for now TBD: COS & LB */
3245        req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3246                                  VNIC_CFG_REQ_ENABLES_RSS_RULE);
3247        req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3248        req.cos_rule = cpu_to_le16(0xffff);
3249        if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3250                ring = 0;
3251        else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3252                ring = vnic_id - 1;
3253
3254        grp_idx = bp->rx_ring[ring].bnapi->index;
3255        req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3256        req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3257
3258        req.lb_rule = cpu_to_le16(0xffff);
3259        req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3260                              VLAN_HLEN);
3261
3262        if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3263                req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3264
3265        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3266}
3267
3268static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3269{
3270        u32 rc = 0;
3271
3272        if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3273                struct hwrm_vnic_free_input req = {0};
3274
3275                bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3276                req.vnic_id =
3277                        cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3278
3279                rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3280                if (rc)
3281                        return rc;
3282                bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3283        }
3284        return rc;
3285}
3286
3287static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3288{
3289        u16 i;
3290
3291        for (i = 0; i < bp->nr_vnics; i++)
3292                bnxt_hwrm_vnic_free_one(bp, i);
3293}
3294
3295static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3296                                unsigned int start_rx_ring_idx,
3297                                unsigned int nr_rings)
3298{
3299        int rc = 0;
3300        unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3301        struct hwrm_vnic_alloc_input req = {0};
3302        struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3303
3304        /* map ring groups to this vnic */
3305        for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3306                grp_idx = bp->rx_ring[i].bnapi->index;
3307                if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3308                        netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3309                                   j, nr_rings);
3310                        break;
3311                }
3312                bp->vnic_info[vnic_id].fw_grp_ids[j] =
3313                                        bp->grp_info[grp_idx].fw_grp_id;
3314        }
3315
3316        bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3317        if (vnic_id == 0)
3318                req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3319
3320        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3321
3322        mutex_lock(&bp->hwrm_cmd_lock);
3323        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3324        if (!rc)
3325                bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3326        mutex_unlock(&bp->hwrm_cmd_lock);
3327        return rc;
3328}
3329
3330static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3331{
3332        u16 i;
3333        u32 rc = 0;
3334
3335        mutex_lock(&bp->hwrm_cmd_lock);
3336        for (i = 0; i < bp->rx_nr_rings; i++) {
3337                struct hwrm_ring_grp_alloc_input req = {0};
3338                struct hwrm_ring_grp_alloc_output *resp =
3339                                        bp->hwrm_cmd_resp_addr;
3340                unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
3341
3342                bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3343
3344                req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3345                req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3346                req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3347                req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
3348
3349                rc = _hwrm_send_message(bp, &req, sizeof(req),
3350                                        HWRM_CMD_TIMEOUT);
3351                if (rc)
3352                        break;
3353
3354                bp->grp_info[grp_idx].fw_grp_id =
3355                        le32_to_cpu(resp->ring_group_id);
3356        }
3357        mutex_unlock(&bp->hwrm_cmd_lock);
3358        return rc;
3359}
3360
3361static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3362{
3363        u16 i;
3364        u32 rc = 0;
3365        struct hwrm_ring_grp_free_input req = {0};
3366
3367        if (!bp->grp_info)
3368                return 0;
3369
3370        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3371
3372        mutex_lock(&bp->hwrm_cmd_lock);
3373        for (i = 0; i < bp->cp_nr_rings; i++) {
3374                if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3375                        continue;
3376                req.ring_group_id =
3377                        cpu_to_le32(bp->grp_info[i].fw_grp_id);
3378
3379                rc = _hwrm_send_message(bp, &req, sizeof(req),
3380                                        HWRM_CMD_TIMEOUT);
3381                if (rc)
3382                        break;
3383                bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3384        }
3385        mutex_unlock(&bp->hwrm_cmd_lock);
3386        return rc;
3387}
3388
3389static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3390                                    struct bnxt_ring_struct *ring,
3391                                    u32 ring_type, u32 map_index,
3392                                    u32 stats_ctx_id)
3393{
3394        int rc = 0, err = 0;
3395        struct hwrm_ring_alloc_input req = {0};
3396        struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3397        u16 ring_id;
3398
3399        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3400
3401        req.enables = 0;
3402        if (ring->nr_pages > 1) {
3403                req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3404                /* Page size is in log2 units */
3405                req.page_size = BNXT_PAGE_SHIFT;
3406                req.page_tbl_depth = 1;
3407        } else {
3408                req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
3409        }
3410        req.fbo = 0;
3411        /* Association of ring index with doorbell index and MSIX number */
3412        req.logical_id = cpu_to_le16(map_index);
3413
3414        switch (ring_type) {
3415        case HWRM_RING_ALLOC_TX:
3416                req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3417                /* Association of transmit ring with completion ring */
3418                req.cmpl_ring_id =
3419                        cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3420                req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3421                req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3422                req.queue_id = cpu_to_le16(ring->queue_id);
3423                break;
3424        case HWRM_RING_ALLOC_RX:
3425                req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3426                req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3427                break;
3428        case HWRM_RING_ALLOC_AGG:
3429                req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3430                req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3431                break;
3432        case HWRM_RING_ALLOC_CMPL:
3433                req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3434                req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3435                if (bp->flags & BNXT_FLAG_USING_MSIX)
3436                        req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3437                break;
3438        default:
3439                netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3440                           ring_type);
3441                return -1;
3442        }
3443
3444        mutex_lock(&bp->hwrm_cmd_lock);
3445        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3446        err = le16_to_cpu(resp->error_code);
3447        ring_id = le16_to_cpu(resp->ring_id);
3448        mutex_unlock(&bp->hwrm_cmd_lock);
3449
3450        if (rc || err) {
3451                switch (ring_type) {
3452                case RING_FREE_REQ_RING_TYPE_CMPL:
3453                        netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3454                                   rc, err);
3455                        return -1;
3456
3457                case RING_FREE_REQ_RING_TYPE_RX:
3458                        netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3459                                   rc, err);
3460                        return -1;
3461
3462                case RING_FREE_REQ_RING_TYPE_TX:
3463                        netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3464                                   rc, err);
3465                        return -1;
3466
3467                default:
3468                        netdev_err(bp->dev, "Invalid ring\n");
3469                        return -1;
3470                }
3471        }
3472        ring->fw_ring_id = ring_id;
3473        return rc;
3474}
3475
3476static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3477{
3478        int i, rc = 0;
3479
3480        for (i = 0; i < bp->cp_nr_rings; i++) {
3481                struct bnxt_napi *bnapi = bp->bnapi[i];
3482                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3483                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3484
3485                cpr->cp_doorbell = bp->bar1 + i * 0x80;
3486                rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3487                                              INVALID_STATS_CTX_ID);
3488                if (rc)
3489                        goto err_out;
3490                BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3491                bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3492        }
3493
3494        for (i = 0; i < bp->tx_nr_rings; i++) {
3495                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3496                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3497                u32 map_idx = txr->bnapi->index;
3498                u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
3499
3500                rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3501                                              map_idx, fw_stats_ctx);
3502                if (rc)
3503                        goto err_out;
3504                txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
3505        }
3506
3507        for (i = 0; i < bp->rx_nr_rings; i++) {
3508                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3509                struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3510                u32 map_idx = rxr->bnapi->index;
3511
3512                rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3513                                              map_idx, INVALID_STATS_CTX_ID);
3514                if (rc)
3515                        goto err_out;
3516                rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
3517                writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3518                bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3519        }
3520
3521        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3522                for (i = 0; i < bp->rx_nr_rings; i++) {
3523                        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3524                        struct bnxt_ring_struct *ring =
3525                                                &rxr->rx_agg_ring_struct;
3526                        u32 grp_idx = rxr->bnapi->index;
3527                        u32 map_idx = grp_idx + bp->rx_nr_rings;
3528
3529                        rc = hwrm_ring_alloc_send_msg(bp, ring,
3530                                                      HWRM_RING_ALLOC_AGG,
3531                                                      map_idx,
3532                                                      INVALID_STATS_CTX_ID);
3533                        if (rc)
3534                                goto err_out;
3535
3536                        rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
3537                        writel(DB_KEY_RX | rxr->rx_agg_prod,
3538                               rxr->rx_agg_doorbell);
3539                        bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
3540                }
3541        }
3542err_out:
3543        return rc;
3544}
3545
3546static int hwrm_ring_free_send_msg(struct bnxt *bp,
3547                                   struct bnxt_ring_struct *ring,
3548                                   u32 ring_type, int cmpl_ring_id)
3549{
3550        int rc;
3551        struct hwrm_ring_free_input req = {0};
3552        struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3553        u16 error_code;
3554
3555        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
3556        req.ring_type = ring_type;
3557        req.ring_id = cpu_to_le16(ring->fw_ring_id);
3558
3559        mutex_lock(&bp->hwrm_cmd_lock);
3560        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3561        error_code = le16_to_cpu(resp->error_code);
3562        mutex_unlock(&bp->hwrm_cmd_lock);
3563
3564        if (rc || error_code) {
3565                switch (ring_type) {
3566                case RING_FREE_REQ_RING_TYPE_CMPL:
3567                        netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3568                                   rc);
3569                        return rc;
3570                case RING_FREE_REQ_RING_TYPE_RX:
3571                        netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3572                                   rc);
3573                        return rc;
3574                case RING_FREE_REQ_RING_TYPE_TX:
3575                        netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3576                                   rc);
3577                        return rc;
3578                default:
3579                        netdev_err(bp->dev, "Invalid ring\n");
3580                        return -1;
3581                }
3582        }
3583        return 0;
3584}
3585
3586static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3587{
3588        int i;
3589
3590        if (!bp->bnapi)
3591                return;
3592
3593        for (i = 0; i < bp->tx_nr_rings; i++) {
3594                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3595                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3596                u32 grp_idx = txr->bnapi->index;
3597                u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3598
3599                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3600                        hwrm_ring_free_send_msg(bp, ring,
3601                                                RING_FREE_REQ_RING_TYPE_TX,
3602                                                close_path ? cmpl_ring_id :
3603                                                INVALID_HW_RING_ID);
3604                        ring->fw_ring_id = INVALID_HW_RING_ID;
3605                }
3606        }
3607
3608        for (i = 0; i < bp->rx_nr_rings; i++) {
3609                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3610                struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3611                u32 grp_idx = rxr->bnapi->index;
3612                u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3613
3614                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3615                        hwrm_ring_free_send_msg(bp, ring,
3616                                                RING_FREE_REQ_RING_TYPE_RX,
3617                                                close_path ? cmpl_ring_id :
3618                                                INVALID_HW_RING_ID);
3619                        ring->fw_ring_id = INVALID_HW_RING_ID;
3620                        bp->grp_info[grp_idx].rx_fw_ring_id =
3621                                INVALID_HW_RING_ID;
3622                }
3623        }
3624
3625        for (i = 0; i < bp->rx_nr_rings; i++) {
3626                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3627                struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3628                u32 grp_idx = rxr->bnapi->index;
3629                u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3630
3631                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3632                        hwrm_ring_free_send_msg(bp, ring,
3633                                                RING_FREE_REQ_RING_TYPE_RX,
3634                                                close_path ? cmpl_ring_id :
3635                                                INVALID_HW_RING_ID);
3636                        ring->fw_ring_id = INVALID_HW_RING_ID;
3637                        bp->grp_info[grp_idx].agg_fw_ring_id =
3638                                INVALID_HW_RING_ID;
3639                }
3640        }
3641
3642        for (i = 0; i < bp->cp_nr_rings; i++) {
3643                struct bnxt_napi *bnapi = bp->bnapi[i];
3644                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3645                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3646
3647                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3648                        hwrm_ring_free_send_msg(bp, ring,
3649                                                RING_FREE_REQ_RING_TYPE_CMPL,
3650                                                INVALID_HW_RING_ID);
3651                        ring->fw_ring_id = INVALID_HW_RING_ID;
3652                        bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3653                }
3654        }
3655}
3656
3657static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
3658        u32 buf_tmrs, u16 flags,
3659        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3660{
3661        req->flags = cpu_to_le16(flags);
3662        req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
3663        req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
3664        req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
3665        req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
3666        /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3667        req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
3668        req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
3669        req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
3670}
3671
3672int bnxt_hwrm_set_coal(struct bnxt *bp)
3673{
3674        int i, rc = 0;
3675        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3676                                                           req_tx = {0}, *req;
3677        u16 max_buf, max_buf_irq;
3678        u16 buf_tmr, buf_tmr_irq;
3679        u32 flags;
3680
3681        bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
3682                               HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3683        bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
3684                               HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3685
3686        /* Each rx completion (2 records) should be DMAed immediately.
3687         * DMA 1/4 of the completion buffers at a time.
3688         */
3689        max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
3690        /* max_buf must not be zero */
3691        max_buf = clamp_t(u16, max_buf, 1, 63);
3692        max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
3693        buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
3694        /* buf timer set to 1/4 of interrupt timer */
3695        buf_tmr = max_t(u16, buf_tmr / 4, 1);
3696        buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
3697        buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3698
3699        flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3700
3701        /* RING_IDLE generates more IRQs for lower latency.  Enable it only
3702         * if coal_ticks is less than 25 us.
3703         */
3704        if (bp->rx_coal_ticks < 25)
3705                flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3706
3707        bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3708                                  buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3709
3710        /* max_buf must not be zero */
3711        max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
3712        max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
3713        buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
3714        /* buf timer set to 1/4 of interrupt timer */
3715        buf_tmr = max_t(u16, buf_tmr / 4, 1);
3716        buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
3717        buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3718
3719        flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3720        bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3721                                  buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3722
3723        mutex_lock(&bp->hwrm_cmd_lock);
3724        for (i = 0; i < bp->cp_nr_rings; i++) {
3725                struct bnxt_napi *bnapi = bp->bnapi[i];
3726
3727                req = &req_rx;
3728                if (!bnapi->rx_ring)
3729                        req = &req_tx;
3730                req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3731
3732                rc = _hwrm_send_message(bp, req, sizeof(*req),
3733                                        HWRM_CMD_TIMEOUT);
3734                if (rc)
3735                        break;
3736        }
3737        mutex_unlock(&bp->hwrm_cmd_lock);
3738        return rc;
3739}
3740
3741static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3742{
3743        int rc = 0, i;
3744        struct hwrm_stat_ctx_free_input req = {0};
3745
3746        if (!bp->bnapi)
3747                return 0;
3748
3749        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3750
3751        mutex_lock(&bp->hwrm_cmd_lock);
3752        for (i = 0; i < bp->cp_nr_rings; i++) {
3753                struct bnxt_napi *bnapi = bp->bnapi[i];
3754                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3755
3756                if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3757                        req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3758
3759                        rc = _hwrm_send_message(bp, &req, sizeof(req),
3760                                                HWRM_CMD_TIMEOUT);
3761                        if (rc)
3762                                break;
3763
3764                        cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3765                }
3766        }
3767        mutex_unlock(&bp->hwrm_cmd_lock);
3768        return rc;
3769}
3770
3771static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3772{
3773        int rc = 0, i;
3774        struct hwrm_stat_ctx_alloc_input req = {0};
3775        struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3776
3777        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3778
3779        req.update_period_ms = cpu_to_le32(1000);
3780
3781        mutex_lock(&bp->hwrm_cmd_lock);
3782        for (i = 0; i < bp->cp_nr_rings; i++) {
3783                struct bnxt_napi *bnapi = bp->bnapi[i];
3784                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3785
3786                req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3787
3788                rc = _hwrm_send_message(bp, &req, sizeof(req),
3789                                        HWRM_CMD_TIMEOUT);
3790                if (rc)
3791                        break;
3792
3793                cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3794
3795                bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3796        }
3797        mutex_unlock(&bp->hwrm_cmd_lock);
3798        return 0;
3799}
3800
3801int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3802{
3803        int rc = 0;
3804        struct hwrm_func_qcaps_input req = {0};
3805        struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3806
3807        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3808        req.fid = cpu_to_le16(0xffff);
3809
3810        mutex_lock(&bp->hwrm_cmd_lock);
3811        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3812        if (rc)
3813                goto hwrm_func_qcaps_exit;
3814
3815        if (BNXT_PF(bp)) {
3816                struct bnxt_pf_info *pf = &bp->pf;
3817
3818                pf->fw_fid = le16_to_cpu(resp->fid);
3819                pf->port_id = le16_to_cpu(resp->port_id);
3820                memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3821                memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3822                pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3823                pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3824                pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3825                pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3826                pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3827                if (!pf->max_hw_ring_grps)
3828                        pf->max_hw_ring_grps = pf->max_tx_rings;
3829                pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3830                pf->max_vnics = le16_to_cpu(resp->max_vnics);
3831                pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3832                pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3833                pf->max_vfs = le16_to_cpu(resp->max_vfs);
3834                pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3835                pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3836                pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3837                pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3838                pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3839                pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3840        } else {
3841#ifdef CONFIG_BNXT_SRIOV
3842                struct bnxt_vf_info *vf = &bp->vf;
3843
3844                vf->fw_fid = le16_to_cpu(resp->fid);
3845                memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3846                if (is_valid_ether_addr(vf->mac_addr))
3847                        /* overwrite netdev dev_adr with admin VF MAC */
3848                        memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3849                else
3850                        random_ether_addr(bp->dev->dev_addr);
3851
3852                vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3853                vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3854                vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3855                vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3856                vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3857                if (!vf->max_hw_ring_grps)
3858                        vf->max_hw_ring_grps = vf->max_tx_rings;
3859                vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3860                vf->max_vnics = le16_to_cpu(resp->max_vnics);
3861                vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3862#endif
3863        }
3864
3865        bp->tx_push_thresh = 0;
3866        if (resp->flags &
3867            cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3868                bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3869
3870hwrm_func_qcaps_exit:
3871        mutex_unlock(&bp->hwrm_cmd_lock);
3872        return rc;
3873}
3874
3875static int bnxt_hwrm_func_reset(struct bnxt *bp)
3876{
3877        struct hwrm_func_reset_input req = {0};
3878
3879        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3880        req.enables = 0;
3881
3882        return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3883}
3884
3885static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3886{
3887        int rc = 0;
3888        struct hwrm_queue_qportcfg_input req = {0};
3889        struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3890        u8 i, *qptr;
3891
3892        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3893
3894        mutex_lock(&bp->hwrm_cmd_lock);
3895        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3896        if (rc)
3897                goto qportcfg_exit;
3898
3899        if (!resp->max_configurable_queues) {
3900                rc = -EINVAL;
3901                goto qportcfg_exit;
3902        }
3903        bp->max_tc = resp->max_configurable_queues;
3904        if (bp->max_tc > BNXT_MAX_QUEUE)
3905                bp->max_tc = BNXT_MAX_QUEUE;
3906
3907        qptr = &resp->queue_id0;
3908        for (i = 0; i < bp->max_tc; i++) {
3909                bp->q_info[i].queue_id = *qptr++;
3910                bp->q_info[i].queue_profile = *qptr++;
3911        }
3912
3913qportcfg_exit:
3914        mutex_unlock(&bp->hwrm_cmd_lock);
3915        return rc;
3916}
3917
3918static int bnxt_hwrm_ver_get(struct bnxt *bp)
3919{
3920        int rc;
3921        struct hwrm_ver_get_input req = {0};
3922        struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3923
3924        bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3925        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3926        req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3927        req.hwrm_intf_min = HWRM_VERSION_MINOR;
3928        req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3929        mutex_lock(&bp->hwrm_cmd_lock);
3930        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3931        if (rc)
3932                goto hwrm_ver_get_exit;
3933
3934        memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3935
3936        if (resp->hwrm_intf_maj < 1) {
3937                netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
3938                            resp->hwrm_intf_maj, resp->hwrm_intf_min,
3939                            resp->hwrm_intf_upd);
3940                netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
3941        }
3942        snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
3943                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3944                 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3945
3946        bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
3947        if (!bp->hwrm_cmd_timeout)
3948                bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3949
3950        if (resp->hwrm_intf_maj >= 1)
3951                bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3952
3953hwrm_ver_get_exit:
3954        mutex_unlock(&bp->hwrm_cmd_lock);
3955        return rc;
3956}
3957
3958static int bnxt_hwrm_port_qstats(struct bnxt *bp)
3959{
3960        int rc;
3961        struct bnxt_pf_info *pf = &bp->pf;
3962        struct hwrm_port_qstats_input req = {0};
3963
3964        if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3965                return 0;
3966
3967        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
3968        req.port_id = cpu_to_le16(pf->port_id);
3969        req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
3970        req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
3971        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3972        return rc;
3973}
3974
3975static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3976{
3977        if (bp->vxlan_port_cnt) {
3978                bnxt_hwrm_tunnel_dst_port_free(
3979                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3980        }
3981        bp->vxlan_port_cnt = 0;
3982        if (bp->nge_port_cnt) {
3983                bnxt_hwrm_tunnel_dst_port_free(
3984                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3985        }
3986        bp->nge_port_cnt = 0;
3987}
3988
3989static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3990{
3991        int rc, i;
3992        u32 tpa_flags = 0;
3993
3994        if (set_tpa)
3995                tpa_flags = bp->flags & BNXT_FLAG_TPA;
3996        for (i = 0; i < bp->nr_vnics; i++) {
3997                rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3998                if (rc) {
3999                        netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4000                                   rc, i);
4001                        return rc;
4002                }
4003        }
4004        return 0;
4005}
4006
4007static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4008{
4009        int i;
4010
4011        for (i = 0; i < bp->nr_vnics; i++)
4012                bnxt_hwrm_vnic_set_rss(bp, i, false);
4013}
4014
4015static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4016                                    bool irq_re_init)
4017{
4018        if (bp->vnic_info) {
4019                bnxt_hwrm_clear_vnic_filter(bp);
4020                /* clear all RSS setting before free vnic ctx */
4021                bnxt_hwrm_clear_vnic_rss(bp);
4022                bnxt_hwrm_vnic_ctx_free(bp);
4023                /* before free the vnic, undo the vnic tpa settings */
4024                if (bp->flags & BNXT_FLAG_TPA)
4025                        bnxt_set_tpa(bp, false);
4026                bnxt_hwrm_vnic_free(bp);
4027        }
4028        bnxt_hwrm_ring_free(bp, close_path);
4029        bnxt_hwrm_ring_grp_free(bp);
4030        if (irq_re_init) {
4031                bnxt_hwrm_stat_ctx_free(bp);
4032                bnxt_hwrm_free_tunnel_ports(bp);
4033        }
4034}
4035
4036static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4037{
4038        int rc;
4039
4040        /* allocate context for vnic */
4041        rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
4042        if (rc) {
4043                netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4044                           vnic_id, rc);
4045                goto vnic_setup_err;
4046        }
4047        bp->rsscos_nr_ctxs++;
4048
4049        /* configure default vnic, ring grp */
4050        rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4051        if (rc) {
4052                netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4053                           vnic_id, rc);
4054                goto vnic_setup_err;
4055        }
4056
4057        /* Enable RSS hashing on vnic */
4058        rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4059        if (rc) {
4060                netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4061                           vnic_id, rc);
4062                goto vnic_setup_err;
4063        }
4064
4065        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4066                rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4067                if (rc) {
4068                        netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4069                                   vnic_id, rc);
4070                }
4071        }
4072
4073vnic_setup_err:
4074        return rc;
4075}
4076
4077static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4078{
4079#ifdef CONFIG_RFS_ACCEL
4080        int i, rc = 0;
4081
4082        for (i = 0; i < bp->rx_nr_rings; i++) {
4083                u16 vnic_id = i + 1;
4084                u16 ring_id = i;
4085
4086                if (vnic_id >= bp->nr_vnics)
4087                        break;
4088
4089                bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
4090                rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4091                if (rc) {
4092                        netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4093                                   vnic_id, rc);
4094                        break;
4095                }
4096                rc = bnxt_setup_vnic(bp, vnic_id);
4097                if (rc)
4098                        break;
4099        }
4100        return rc;
4101#else
4102        return 0;
4103#endif
4104}
4105
4106static int bnxt_cfg_rx_mode(struct bnxt *);
4107static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
4108
4109static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4110{
4111        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4112        int rc = 0;
4113
4114        if (irq_re_init) {
4115                rc = bnxt_hwrm_stat_ctx_alloc(bp);
4116                if (rc) {
4117                        netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4118                                   rc);
4119                        goto err_out;
4120                }
4121        }
4122
4123        rc = bnxt_hwrm_ring_alloc(bp);
4124        if (rc) {
4125                netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4126                goto err_out;
4127        }
4128
4129        rc = bnxt_hwrm_ring_grp_alloc(bp);
4130        if (rc) {
4131                netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4132                goto err_out;
4133        }
4134
4135        /* default vnic 0 */
4136        rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
4137        if (rc) {
4138                netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4139                goto err_out;
4140        }
4141
4142        rc = bnxt_setup_vnic(bp, 0);
4143        if (rc)
4144                goto err_out;
4145
4146        if (bp->flags & BNXT_FLAG_RFS) {
4147                rc = bnxt_alloc_rfs_vnics(bp);
4148                if (rc)
4149                        goto err_out;
4150        }
4151
4152        if (bp->flags & BNXT_FLAG_TPA) {
4153                rc = bnxt_set_tpa(bp, true);
4154                if (rc)
4155                        goto err_out;
4156        }
4157
4158        if (BNXT_VF(bp))
4159                bnxt_update_vf_mac(bp);
4160
4161        /* Filter for default vnic 0 */
4162        rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4163        if (rc) {
4164                netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4165                goto err_out;
4166        }
4167        vnic->uc_filter_count = 1;
4168
4169        vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4170
4171        if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4172                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4173
4174        if (bp->dev->flags & IFF_ALLMULTI) {
4175                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4176                vnic->mc_list_count = 0;
4177        } else {
4178                u32 mask = 0;
4179
4180                bnxt_mc_list_updated(bp, &mask);
4181                vnic->rx_mask |= mask;
4182        }
4183
4184        rc = bnxt_cfg_rx_mode(bp);
4185        if (rc)
4186                goto err_out;
4187
4188        rc = bnxt_hwrm_set_coal(bp);
4189        if (rc)
4190                netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
4191                            rc);
4192
4193        return 0;
4194
4195err_out:
4196        bnxt_hwrm_resource_free(bp, 0, true);
4197
4198        return rc;
4199}
4200
4201static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4202{
4203        bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4204        return 0;
4205}
4206
4207static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4208{
4209        bnxt_init_rx_rings(bp);
4210        bnxt_init_tx_rings(bp);
4211        bnxt_init_ring_grps(bp, irq_re_init);
4212        bnxt_init_vnics(bp);
4213
4214        return bnxt_init_chip(bp, irq_re_init);
4215}
4216
4217static void bnxt_disable_int(struct bnxt *bp)
4218{
4219        int i;
4220
4221        if (!bp->bnapi)
4222                return;
4223
4224        for (i = 0; i < bp->cp_nr_rings; i++) {
4225                struct bnxt_napi *bnapi = bp->bnapi[i];
4226                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4227
4228                BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4229        }
4230}
4231
4232static void bnxt_enable_int(struct bnxt *bp)
4233{
4234        int i;
4235
4236        atomic_set(&bp->intr_sem, 0);
4237        for (i = 0; i < bp->cp_nr_rings; i++) {
4238                struct bnxt_napi *bnapi = bp->bnapi[i];
4239                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4240
4241                BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4242        }
4243}
4244
4245static int bnxt_set_real_num_queues(struct bnxt *bp)
4246{
4247        int rc;
4248        struct net_device *dev = bp->dev;
4249
4250        rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4251        if (rc)
4252                return rc;
4253
4254        rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4255        if (rc)
4256                return rc;
4257
4258#ifdef CONFIG_RFS_ACCEL
4259        if (bp->flags & BNXT_FLAG_RFS)
4260                dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4261#endif
4262
4263        return rc;
4264}
4265
4266static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4267                           bool shared)
4268{
4269        int _rx = *rx, _tx = *tx;
4270
4271        if (shared) {
4272                *rx = min_t(int, _rx, max);
4273                *tx = min_t(int, _tx, max);
4274        } else {
4275                if (max < 2)
4276                        return -ENOMEM;
4277
4278                while (_rx + _tx > max) {
4279                        if (_rx > _tx && _rx > 1)
4280                                _rx--;
4281                        else if (_tx > 1)
4282                                _tx--;
4283                }
4284                *rx = _rx;
4285                *tx = _tx;
4286        }
4287        return 0;
4288}
4289
4290static int bnxt_setup_msix(struct bnxt *bp)
4291{
4292        struct msix_entry *msix_ent;
4293        struct net_device *dev = bp->dev;
4294        int i, total_vecs, rc = 0, min = 1;
4295        const int len = sizeof(bp->irq_tbl[0].name);
4296
4297        bp->flags &= ~BNXT_FLAG_USING_MSIX;
4298        total_vecs = bp->cp_nr_rings;
4299
4300        msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4301        if (!msix_ent)
4302                return -ENOMEM;
4303
4304        for (i = 0; i < total_vecs; i++) {
4305                msix_ent[i].entry = i;
4306                msix_ent[i].vector = 0;
4307        }
4308
4309        if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4310                min = 2;
4311
4312        total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
4313        if (total_vecs < 0) {
4314                rc = -ENODEV;
4315                goto msix_setup_exit;
4316        }
4317
4318        bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4319        if (bp->irq_tbl) {
4320                int tcs;
4321
4322                /* Trim rings based upon num of vectors allocated */
4323                rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
4324                                     total_vecs, min == 1);
4325                if (rc)
4326                        goto msix_setup_exit;
4327
4328                bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4329                tcs = netdev_get_num_tc(dev);
4330                if (tcs > 1) {
4331                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4332                        if (bp->tx_nr_rings_per_tc == 0) {
4333                                netdev_reset_tc(dev);
4334                                bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4335                        } else {
4336                                int i, off, count;
4337
4338                                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4339                                for (i = 0; i < tcs; i++) {
4340                                        count = bp->tx_nr_rings_per_tc;
4341                                        off = i * count;
4342                                        netdev_set_tc_queue(dev, i, count, off);
4343                                }
4344                        }
4345                }
4346                bp->cp_nr_rings = total_vecs;
4347
4348                for (i = 0; i < bp->cp_nr_rings; i++) {
4349                        char *attr;
4350
4351                        bp->irq_tbl[i].vector = msix_ent[i].vector;
4352                        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4353                                attr = "TxRx";
4354                        else if (i < bp->rx_nr_rings)
4355                                attr = "rx";
4356                        else
4357                                attr = "tx";
4358
4359                        snprintf(bp->irq_tbl[i].name, len,
4360                                 "%s-%s-%d", dev->name, attr, i);
4361                        bp->irq_tbl[i].handler = bnxt_msix;
4362                }
4363                rc = bnxt_set_real_num_queues(bp);
4364                if (rc)
4365                        goto msix_setup_exit;
4366        } else {
4367                rc = -ENOMEM;
4368                goto msix_setup_exit;
4369        }
4370        bp->flags |= BNXT_FLAG_USING_MSIX;
4371        kfree(msix_ent);
4372        return 0;
4373
4374msix_setup_exit:
4375        netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4376        pci_disable_msix(bp->pdev);
4377        kfree(msix_ent);
4378        return rc;
4379}
4380
4381static int bnxt_setup_inta(struct bnxt *bp)
4382{
4383        int rc;
4384        const int len = sizeof(bp->irq_tbl[0].name);
4385
4386        if (netdev_get_num_tc(bp->dev))
4387                netdev_reset_tc(bp->dev);
4388
4389        bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4390        if (!bp->irq_tbl) {
4391                rc = -ENOMEM;
4392                return rc;
4393        }
4394        bp->rx_nr_rings = 1;
4395        bp->tx_nr_rings = 1;
4396        bp->cp_nr_rings = 1;
4397        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4398        bp->flags |= BNXT_FLAG_SHARED_RINGS;
4399        bp->irq_tbl[0].vector = bp->pdev->irq;
4400        snprintf(bp->irq_tbl[0].name, len,
4401                 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4402        bp->irq_tbl[0].handler = bnxt_inta;
4403        rc = bnxt_set_real_num_queues(bp);
4404        return rc;
4405}
4406
4407static int bnxt_setup_int_mode(struct bnxt *bp)
4408{
4409        int rc = 0;
4410
4411        if (bp->flags & BNXT_FLAG_MSIX_CAP)
4412                rc = bnxt_setup_msix(bp);
4413
4414        if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
4415                /* fallback to INTA */
4416                rc = bnxt_setup_inta(bp);
4417        }
4418        return rc;
4419}
4420
4421static void bnxt_free_irq(struct bnxt *bp)
4422{
4423        struct bnxt_irq *irq;
4424        int i;
4425
4426#ifdef CONFIG_RFS_ACCEL
4427        free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4428        bp->dev->rx_cpu_rmap = NULL;
4429#endif
4430        if (!bp->irq_tbl)
4431                return;
4432
4433        for (i = 0; i < bp->cp_nr_rings; i++) {
4434                irq = &bp->irq_tbl[i];
4435                if (irq->requested)
4436                        free_irq(irq->vector, bp->bnapi[i]);
4437                irq->requested = 0;
4438        }
4439        if (bp->flags & BNXT_FLAG_USING_MSIX)
4440                pci_disable_msix(bp->pdev);
4441        kfree(bp->irq_tbl);
4442        bp->irq_tbl = NULL;
4443}
4444
4445static int bnxt_request_irq(struct bnxt *bp)
4446{
4447        int i, j, rc = 0;
4448        unsigned long flags = 0;
4449#ifdef CONFIG_RFS_ACCEL
4450        struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4451#endif
4452
4453        if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4454                flags = IRQF_SHARED;
4455
4456        for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4457                struct bnxt_irq *irq = &bp->irq_tbl[i];
4458#ifdef CONFIG_RFS_ACCEL
4459                if (rmap && bp->bnapi[i]->rx_ring) {
4460                        rc = irq_cpu_rmap_add(rmap, irq->vector);
4461                        if (rc)
4462                                netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4463                                            j);
4464                        j++;
4465                }
4466#endif
4467                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4468                                 bp->bnapi[i]);
4469                if (rc)
4470                        break;
4471
4472                irq->requested = 1;
4473        }
4474        return rc;
4475}
4476
4477static void bnxt_del_napi(struct bnxt *bp)
4478{
4479        int i;
4480
4481        if (!bp->bnapi)
4482                return;
4483
4484        for (i = 0; i < bp->cp_nr_rings; i++) {
4485                struct bnxt_napi *bnapi = bp->bnapi[i];
4486
4487                napi_hash_del(&bnapi->napi);
4488                netif_napi_del(&bnapi->napi);
4489        }
4490}
4491
4492static void bnxt_init_napi(struct bnxt *bp)
4493{
4494        int i;
4495        struct bnxt_napi *bnapi;
4496
4497        if (bp->flags & BNXT_FLAG_USING_MSIX) {
4498                for (i = 0; i < bp->cp_nr_rings; i++) {
4499                        bnapi = bp->bnapi[i];
4500                        netif_napi_add(bp->dev, &bnapi->napi,
4501                                       bnxt_poll, 64);
4502                }
4503        } else {
4504                bnapi = bp->bnapi[0];
4505                netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
4506        }
4507}
4508
4509static void bnxt_disable_napi(struct bnxt *bp)
4510{
4511        int i;
4512
4513        if (!bp->bnapi)
4514                return;
4515
4516        for (i = 0; i < bp->cp_nr_rings; i++) {
4517                napi_disable(&bp->bnapi[i]->napi);
4518                bnxt_disable_poll(bp->bnapi[i]);
4519        }
4520}
4521
4522static void bnxt_enable_napi(struct bnxt *bp)
4523{
4524        int i;
4525
4526        for (i = 0; i < bp->cp_nr_rings; i++) {
4527                bp->bnapi[i]->in_reset = false;
4528                bnxt_enable_poll(bp->bnapi[i]);
4529                napi_enable(&bp->bnapi[i]->napi);
4530        }
4531}
4532
4533static void bnxt_tx_disable(struct bnxt *bp)
4534{
4535        int i;
4536        struct bnxt_tx_ring_info *txr;
4537        struct netdev_queue *txq;
4538
4539        if (bp->tx_ring) {
4540                for (i = 0; i < bp->tx_nr_rings; i++) {
4541                        txr = &bp->tx_ring[i];
4542                        txq = netdev_get_tx_queue(bp->dev, i);
4543                        __netif_tx_lock(txq, smp_processor_id());
4544                        txr->dev_state = BNXT_DEV_STATE_CLOSING;
4545                        __netif_tx_unlock(txq);
4546                }
4547        }
4548        /* Stop all TX queues */
4549        netif_tx_disable(bp->dev);
4550        netif_carrier_off(bp->dev);
4551}
4552
4553static void bnxt_tx_enable(struct bnxt *bp)
4554{
4555        int i;
4556        struct bnxt_tx_ring_info *txr;
4557        struct netdev_queue *txq;
4558
4559        for (i = 0; i < bp->tx_nr_rings; i++) {
4560                txr = &bp->tx_ring[i];
4561                txq = netdev_get_tx_queue(bp->dev, i);
4562                txr->dev_state = 0;
4563        }
4564        netif_tx_wake_all_queues(bp->dev);
4565        if (bp->link_info.link_up)
4566                netif_carrier_on(bp->dev);
4567}
4568
4569static void bnxt_report_link(struct bnxt *bp)
4570{
4571        if (bp->link_info.link_up) {
4572                const char *duplex;
4573                const char *flow_ctrl;
4574                u16 speed;
4575
4576                netif_carrier_on(bp->dev);
4577                if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4578                        duplex = "full";
4579                else
4580                        duplex = "half";
4581                if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4582                        flow_ctrl = "ON - receive & transmit";
4583                else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4584                        flow_ctrl = "ON - transmit";
4585                else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4586                        flow_ctrl = "ON - receive";
4587                else
4588                        flow_ctrl = "none";
4589                speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4590                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4591                            speed, duplex, flow_ctrl);
4592        } else {
4593                netif_carrier_off(bp->dev);
4594                netdev_err(bp->dev, "NIC Link is Down\n");
4595        }
4596}
4597
4598static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4599{
4600        int rc = 0;
4601        struct bnxt_link_info *link_info = &bp->link_info;
4602        struct hwrm_port_phy_qcfg_input req = {0};
4603        struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4604        u8 link_up = link_info->link_up;
4605
4606        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4607
4608        mutex_lock(&bp->hwrm_cmd_lock);
4609        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4610        if (rc) {
4611                mutex_unlock(&bp->hwrm_cmd_lock);
4612                return rc;
4613        }
4614
4615        memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4616        link_info->phy_link_status = resp->link;
4617        link_info->duplex =  resp->duplex;
4618        link_info->pause = resp->pause;
4619        link_info->auto_mode = resp->auto_mode;
4620        link_info->auto_pause_setting = resp->auto_pause;
4621        link_info->lp_pause = resp->link_partner_adv_pause;
4622        link_info->force_pause_setting = resp->force_pause;
4623        link_info->duplex_setting = resp->duplex;
4624        if (link_info->phy_link_status == BNXT_LINK_LINK)
4625                link_info->link_speed = le16_to_cpu(resp->link_speed);
4626        else
4627                link_info->link_speed = 0;
4628        link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4629        link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4630        link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4631        link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4632        link_info->lp_auto_link_speeds =
4633                le16_to_cpu(resp->link_partner_adv_speeds);
4634        link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4635        link_info->phy_ver[0] = resp->phy_maj;
4636        link_info->phy_ver[1] = resp->phy_min;
4637        link_info->phy_ver[2] = resp->phy_bld;
4638        link_info->media_type = resp->media_type;
4639        link_info->transceiver = resp->transceiver_type;
4640        link_info->phy_addr = resp->phy_addr;
4641
4642        /* TODO: need to add more logic to report VF link */
4643        if (chng_link_state) {
4644                if (link_info->phy_link_status == BNXT_LINK_LINK)
4645                        link_info->link_up = 1;
4646                else
4647                        link_info->link_up = 0;
4648                if (link_up != link_info->link_up)
4649                        bnxt_report_link(bp);
4650        } else {
4651                /* alwasy link down if not require to update link state */
4652                link_info->link_up = 0;
4653        }
4654        mutex_unlock(&bp->hwrm_cmd_lock);
4655        return 0;
4656}
4657
4658static void
4659bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4660{
4661        if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4662                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4663                        req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4664                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4665                        req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4666                req->enables |=
4667                        cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4668        } else {
4669                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4670                        req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4671                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4672                        req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4673                req->enables |=
4674                        cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4675        }
4676}
4677
4678static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4679                                      struct hwrm_port_phy_cfg_input *req)
4680{
4681        u8 autoneg = bp->link_info.autoneg;
4682        u16 fw_link_speed = bp->link_info.req_link_speed;
4683        u32 advertising = bp->link_info.advertising;
4684
4685        if (autoneg & BNXT_AUTONEG_SPEED) {
4686                req->auto_mode |=
4687                        PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
4688
4689                req->enables |= cpu_to_le32(
4690                        PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4691                req->auto_link_speed_mask = cpu_to_le16(advertising);
4692
4693                req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4694                req->flags |=
4695                        cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4696        } else {
4697                req->force_link_speed = cpu_to_le16(fw_link_speed);
4698                req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4699        }
4700
4701        /* currently don't support half duplex */
4702        req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4703        req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4704        /* tell chimp that the setting takes effect immediately */
4705        req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4706}
4707
4708int bnxt_hwrm_set_pause(struct bnxt *bp)
4709{
4710        struct hwrm_port_phy_cfg_input req = {0};
4711        int rc;
4712
4713        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4714        bnxt_hwrm_set_pause_common(bp, &req);
4715
4716        if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4717            bp->link_info.force_link_chng)
4718                bnxt_hwrm_set_link_common(bp, &req);
4719
4720        mutex_lock(&bp->hwrm_cmd_lock);
4721        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4722        if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4723                /* since changing of pause setting doesn't trigger any link
4724                 * change event, the driver needs to update the current pause
4725                 * result upon successfully return of the phy_cfg command
4726                 */
4727                bp->link_info.pause =
4728                bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4729                bp->link_info.auto_pause_setting = 0;
4730                if (!bp->link_info.force_link_chng)
4731                        bnxt_report_link(bp);
4732        }
4733        bp->link_info.force_link_chng = false;
4734        mutex_unlock(&bp->hwrm_cmd_lock);
4735        return rc;
4736}
4737
4738int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4739{
4740        struct hwrm_port_phy_cfg_input req = {0};
4741
4742        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4743        if (set_pause)
4744                bnxt_hwrm_set_pause_common(bp, &req);
4745
4746        bnxt_hwrm_set_link_common(bp, &req);
4747        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4748}
4749
4750static int bnxt_update_phy_setting(struct bnxt *bp)
4751{
4752        int rc;
4753        bool update_link = false;
4754        bool update_pause = false;
4755        struct bnxt_link_info *link_info = &bp->link_info;
4756
4757        rc = bnxt_update_link(bp, true);
4758        if (rc) {
4759                netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4760                           rc);
4761                return rc;
4762        }
4763        if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4764            link_info->auto_pause_setting != link_info->req_flow_ctrl)
4765                update_pause = true;
4766        if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4767            link_info->force_pause_setting != link_info->req_flow_ctrl)
4768                update_pause = true;
4769        if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4770                if (BNXT_AUTO_MODE(link_info->auto_mode))
4771                        update_link = true;
4772                if (link_info->req_link_speed != link_info->force_link_speed)
4773                        update_link = true;
4774                if (link_info->req_duplex != link_info->duplex_setting)
4775                        update_link = true;
4776        } else {
4777                if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4778                        update_link = true;
4779                if (link_info->advertising != link_info->auto_link_speeds)
4780                        update_link = true;
4781        }
4782
4783        if (update_link)
4784                rc = bnxt_hwrm_set_link_setting(bp, update_pause);
4785        else if (update_pause)
4786                rc = bnxt_hwrm_set_pause(bp);
4787        if (rc) {
4788                netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4789                           rc);
4790                return rc;
4791        }
4792
4793        return rc;
4794}
4795
4796/* Common routine to pre-map certain register block to different GRC window.
4797 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4798 * in PF and 3 windows in VF that can be customized to map in different
4799 * register blocks.
4800 */
4801static void bnxt_preset_reg_win(struct bnxt *bp)
4802{
4803        if (BNXT_PF(bp)) {
4804                /* CAG registers map to GRC window #4 */
4805                writel(BNXT_CAG_REG_BASE,
4806                       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4807        }
4808}
4809
4810static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4811{
4812        int rc = 0;
4813
4814        bnxt_preset_reg_win(bp);
4815        netif_carrier_off(bp->dev);
4816        if (irq_re_init) {
4817                rc = bnxt_setup_int_mode(bp);
4818                if (rc) {
4819                        netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4820                                   rc);
4821                        return rc;
4822                }
4823        }
4824        if ((bp->flags & BNXT_FLAG_RFS) &&
4825            !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4826                /* disable RFS if falling back to INTA */
4827                bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4828                bp->flags &= ~BNXT_FLAG_RFS;
4829        }
4830
4831        rc = bnxt_alloc_mem(bp, irq_re_init);
4832        if (rc) {
4833                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4834                goto open_err_free_mem;
4835        }
4836
4837        if (irq_re_init) {
4838                bnxt_init_napi(bp);
4839                rc = bnxt_request_irq(bp);
4840                if (rc) {
4841                        netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4842                        goto open_err;
4843                }
4844        }
4845
4846        bnxt_enable_napi(bp);
4847
4848        rc = bnxt_init_nic(bp, irq_re_init);
4849        if (rc) {
4850                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4851                goto open_err;
4852        }
4853
4854        if (link_re_init) {
4855                rc = bnxt_update_phy_setting(bp);
4856                if (rc)
4857                        netdev_warn(bp->dev, "failed to update phy settings\n");
4858        }
4859
4860        if (irq_re_init) {
4861#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4862                vxlan_get_rx_port(bp->dev);
4863#endif
4864                if (!bnxt_hwrm_tunnel_dst_port_alloc(
4865                                bp, htons(0x17c1),
4866                                TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4867                        bp->nge_port_cnt = 1;
4868        }
4869
4870        set_bit(BNXT_STATE_OPEN, &bp->state);
4871        bnxt_enable_int(bp);
4872        /* Enable TX queues */
4873        bnxt_tx_enable(bp);
4874        mod_timer(&bp->timer, jiffies + bp->current_interval);
4875        bnxt_update_link(bp, true);
4876
4877        return 0;
4878
4879open_err:
4880        bnxt_disable_napi(bp);
4881        bnxt_del_napi(bp);
4882
4883open_err_free_mem:
4884        bnxt_free_skbs(bp);
4885        bnxt_free_irq(bp);
4886        bnxt_free_mem(bp, true);
4887        return rc;
4888}
4889
4890/* rtnl_lock held */
4891int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4892{
4893        int rc = 0;
4894
4895        rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4896        if (rc) {
4897                netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4898                dev_close(bp->dev);
4899        }
4900        return rc;
4901}
4902
4903static int bnxt_open(struct net_device *dev)
4904{
4905        struct bnxt *bp = netdev_priv(dev);
4906        int rc = 0;
4907
4908        rc = bnxt_hwrm_func_reset(bp);
4909        if (rc) {
4910                netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4911                           rc);
4912                rc = -1;
4913                return rc;
4914        }
4915        return __bnxt_open_nic(bp, true, true);
4916}
4917
4918static void bnxt_disable_int_sync(struct bnxt *bp)
4919{
4920        int i;
4921
4922        atomic_inc(&bp->intr_sem);
4923        if (!netif_running(bp->dev))
4924                return;
4925
4926        bnxt_disable_int(bp);
4927        for (i = 0; i < bp->cp_nr_rings; i++)
4928                synchronize_irq(bp->irq_tbl[i].vector);
4929}
4930
4931int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4932{
4933        int rc = 0;
4934
4935#ifdef CONFIG_BNXT_SRIOV
4936        if (bp->sriov_cfg) {
4937                rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4938                                                      !bp->sriov_cfg,
4939                                                      BNXT_SRIOV_CFG_WAIT_TMO);
4940                if (rc)
4941                        netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4942        }
4943#endif
4944        /* Change device state to avoid TX queue wake up's */
4945        bnxt_tx_disable(bp);
4946
4947        clear_bit(BNXT_STATE_OPEN, &bp->state);
4948        smp_mb__after_atomic();
4949        while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4950                msleep(20);
4951
4952        /* Flush rings before disabling interrupts */
4953        bnxt_shutdown_nic(bp, irq_re_init);
4954
4955        /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4956
4957        bnxt_disable_napi(bp);
4958        bnxt_disable_int_sync(bp);
4959        del_timer_sync(&bp->timer);
4960        bnxt_free_skbs(bp);
4961
4962        if (irq_re_init) {
4963                bnxt_free_irq(bp);
4964                bnxt_del_napi(bp);
4965        }
4966        bnxt_free_mem(bp, irq_re_init);
4967        return rc;
4968}
4969
4970static int bnxt_close(struct net_device *dev)
4971{
4972        struct bnxt *bp = netdev_priv(dev);
4973
4974        bnxt_close_nic(bp, true, true);
4975        return 0;
4976}
4977
4978/* rtnl_lock held */
4979static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4980{
4981        switch (cmd) {
4982        case SIOCGMIIPHY:
4983                /* fallthru */
4984        case SIOCGMIIREG: {
4985                if (!netif_running(dev))
4986                        return -EAGAIN;
4987
4988                return 0;
4989        }
4990
4991        case SIOCSMIIREG:
4992                if (!netif_running(dev))
4993                        return -EAGAIN;
4994
4995                return 0;
4996
4997        default:
4998                /* do nothing */
4999                break;
5000        }
5001        return -EOPNOTSUPP;
5002}
5003
5004static struct rtnl_link_stats64 *
5005bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5006{
5007        u32 i;
5008        struct bnxt *bp = netdev_priv(dev);
5009
5010        memset(stats, 0, sizeof(struct rtnl_link_stats64));
5011
5012        if (!bp->bnapi)
5013                return stats;
5014
5015        /* TODO check if we need to synchronize with bnxt_close path */
5016        for (i = 0; i < bp->cp_nr_rings; i++) {
5017                struct bnxt_napi *bnapi = bp->bnapi[i];
5018                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5019                struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5020
5021                stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5022                stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5023                stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5024
5025                stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5026                stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5027                stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5028
5029                stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5030                stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5031                stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5032
5033                stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5034                stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5035                stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5036
5037                stats->rx_missed_errors +=
5038                        le64_to_cpu(hw_stats->rx_discard_pkts);
5039
5040                stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5041
5042                stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5043        }
5044
5045        if (bp->flags & BNXT_FLAG_PORT_STATS) {
5046                struct rx_port_stats *rx = bp->hw_rx_port_stats;
5047                struct tx_port_stats *tx = bp->hw_tx_port_stats;
5048
5049                stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5050                stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5051                stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5052                                          le64_to_cpu(rx->rx_ovrsz_frames) +
5053                                          le64_to_cpu(rx->rx_runt_frames);
5054                stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5055                                   le64_to_cpu(rx->rx_jbr_frames);
5056                stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5057                stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5058                stats->tx_errors = le64_to_cpu(tx->tx_err);
5059        }
5060
5061        return stats;
5062}
5063
5064static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5065{
5066        struct net_device *dev = bp->dev;
5067        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5068        struct netdev_hw_addr *ha;
5069        u8 *haddr;
5070        int mc_count = 0;
5071        bool update = false;
5072        int off = 0;
5073
5074        netdev_for_each_mc_addr(ha, dev) {
5075                if (mc_count >= BNXT_MAX_MC_ADDRS) {
5076                        *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5077                        vnic->mc_list_count = 0;
5078                        return false;
5079                }
5080                haddr = ha->addr;
5081                if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5082                        memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5083                        update = true;
5084                }
5085                off += ETH_ALEN;
5086                mc_count++;
5087        }
5088        if (mc_count)
5089                *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5090
5091        if (mc_count != vnic->mc_list_count) {
5092                vnic->mc_list_count = mc_count;
5093                update = true;
5094        }
5095        return update;
5096}
5097
5098static bool bnxt_uc_list_updated(struct bnxt *bp)
5099{
5100        struct net_device *dev = bp->dev;
5101        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5102        struct netdev_hw_addr *ha;
5103        int off = 0;
5104
5105        if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5106                return true;
5107
5108        netdev_for_each_uc_addr(ha, dev) {
5109                if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5110                        return true;
5111
5112                off += ETH_ALEN;
5113        }
5114        return false;
5115}
5116
5117static void bnxt_set_rx_mode(struct net_device *dev)
5118{
5119        struct bnxt *bp = netdev_priv(dev);
5120        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5121        u32 mask = vnic->rx_mask;
5122        bool mc_update = false;
5123        bool uc_update;
5124
5125        if (!netif_running(dev))
5126                return;
5127
5128        mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5129                  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5130                  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5131
5132        /* Only allow PF to be in promiscuous mode */
5133        if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
5134                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5135
5136        uc_update = bnxt_uc_list_updated(bp);
5137
5138        if (dev->flags & IFF_ALLMULTI) {
5139                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5140                vnic->mc_list_count = 0;
5141        } else {
5142                mc_update = bnxt_mc_list_updated(bp, &mask);
5143        }
5144
5145        if (mask != vnic->rx_mask || uc_update || mc_update) {
5146                vnic->rx_mask = mask;
5147
5148                set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5149                schedule_work(&bp->sp_task);
5150        }
5151}
5152
5153static int bnxt_cfg_rx_mode(struct bnxt *bp)
5154{
5155        struct net_device *dev = bp->dev;
5156        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5157        struct netdev_hw_addr *ha;
5158        int i, off = 0, rc;
5159        bool uc_update;
5160
5161        netif_addr_lock_bh(dev);
5162        uc_update = bnxt_uc_list_updated(bp);
5163        netif_addr_unlock_bh(dev);
5164
5165        if (!uc_update)
5166                goto skip_uc;
5167
5168        mutex_lock(&bp->hwrm_cmd_lock);
5169        for (i = 1; i < vnic->uc_filter_count; i++) {
5170                struct hwrm_cfa_l2_filter_free_input req = {0};
5171
5172                bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5173                                       -1);
5174
5175                req.l2_filter_id = vnic->fw_l2_filter_id[i];
5176
5177                rc = _hwrm_send_message(bp, &req, sizeof(req),
5178                                        HWRM_CMD_TIMEOUT);
5179        }
5180        mutex_unlock(&bp->hwrm_cmd_lock);
5181
5182        vnic->uc_filter_count = 1;
5183
5184        netif_addr_lock_bh(dev);
5185        if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5186                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5187        } else {
5188                netdev_for_each_uc_addr(ha, dev) {
5189                        memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5190                        off += ETH_ALEN;
5191                        vnic->uc_filter_count++;
5192                }
5193        }
5194        netif_addr_unlock_bh(dev);
5195
5196        for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5197                rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5198                if (rc) {
5199                        netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5200                                   rc);
5201                        vnic->uc_filter_count = i;
5202                        return rc;
5203                }
5204        }
5205
5206skip_uc:
5207        rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5208        if (rc)
5209                netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
5210                           rc);
5211
5212        return rc;
5213}
5214
5215static bool bnxt_rfs_capable(struct bnxt *bp)
5216{
5217#ifdef CONFIG_RFS_ACCEL
5218        struct bnxt_pf_info *pf = &bp->pf;
5219        int vnics;
5220
5221        if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5222                return false;
5223
5224        vnics = 1 + bp->rx_nr_rings;
5225        if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
5226                return false;
5227
5228        return true;
5229#else
5230        return false;
5231#endif
5232}
5233
5234static netdev_features_t bnxt_fix_features(struct net_device *dev,
5235                                           netdev_features_t features)
5236{
5237        struct bnxt *bp = netdev_priv(dev);
5238
5239        if (!bnxt_rfs_capable(bp))
5240                features &= ~NETIF_F_NTUPLE;
5241        return features;
5242}
5243
5244static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
5245{
5246        struct bnxt *bp = netdev_priv(dev);
5247        u32 flags = bp->flags;
5248        u32 changes;
5249        int rc = 0;
5250        bool re_init = false;
5251        bool update_tpa = false;
5252
5253        flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
5254        if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
5255                flags |= BNXT_FLAG_GRO;
5256        if (features & NETIF_F_LRO)
5257                flags |= BNXT_FLAG_LRO;
5258
5259        if (features & NETIF_F_HW_VLAN_CTAG_RX)
5260                flags |= BNXT_FLAG_STRIP_VLAN;
5261
5262        if (features & NETIF_F_NTUPLE)
5263                flags |= BNXT_FLAG_RFS;
5264
5265        changes = flags ^ bp->flags;
5266        if (changes & BNXT_FLAG_TPA) {
5267                update_tpa = true;
5268                if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
5269                    (flags & BNXT_FLAG_TPA) == 0)
5270                        re_init = true;
5271        }
5272
5273        if (changes & ~BNXT_FLAG_TPA)
5274                re_init = true;
5275
5276        if (flags != bp->flags) {
5277                u32 old_flags = bp->flags;
5278
5279                bp->flags = flags;
5280
5281                if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5282                        if (update_tpa)
5283                                bnxt_set_ring_params(bp);
5284                        return rc;
5285                }
5286
5287                if (re_init) {
5288                        bnxt_close_nic(bp, false, false);
5289                        if (update_tpa)
5290                                bnxt_set_ring_params(bp);
5291
5292                        return bnxt_open_nic(bp, false, false);
5293                }
5294                if (update_tpa) {
5295                        rc = bnxt_set_tpa(bp,
5296                                          (flags & BNXT_FLAG_TPA) ?
5297                                          true : false);
5298                        if (rc)
5299                                bp->flags = old_flags;
5300                }
5301        }
5302        return rc;
5303}
5304
5305static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
5306{
5307        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
5308        int i = bnapi->index;
5309
5310        if (!txr)
5311                return;
5312
5313        netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5314                    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
5315                    txr->tx_cons);
5316}
5317
5318static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
5319{
5320        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
5321        int i = bnapi->index;
5322
5323        if (!rxr)
5324                return;
5325
5326        netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5327                    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
5328                    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
5329                    rxr->rx_sw_agg_prod);
5330}
5331
5332static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
5333{
5334        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5335        int i = bnapi->index;
5336
5337        netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5338                    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
5339}
5340
5341static void bnxt_dbg_dump_states(struct bnxt *bp)
5342{
5343        int i;
5344        struct bnxt_napi *bnapi;
5345
5346        for (i = 0; i < bp->cp_nr_rings; i++) {
5347                bnapi = bp->bnapi[i];
5348                if (netif_msg_drv(bp)) {
5349                        bnxt_dump_tx_sw_state(bnapi);
5350                        bnxt_dump_rx_sw_state(bnapi);
5351                        bnxt_dump_cp_sw_state(bnapi);
5352                }
5353        }
5354}
5355
5356static void bnxt_reset_task(struct bnxt *bp)
5357{
5358        bnxt_dbg_dump_states(bp);
5359        if (netif_running(bp->dev)) {
5360                bnxt_close_nic(bp, false, false);
5361                bnxt_open_nic(bp, false, false);
5362        }
5363}
5364
5365static void bnxt_tx_timeout(struct net_device *dev)
5366{
5367        struct bnxt *bp = netdev_priv(dev);
5368
5369        netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
5370        set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5371        schedule_work(&bp->sp_task);
5372}
5373
5374#ifdef CONFIG_NET_POLL_CONTROLLER
5375static void bnxt_poll_controller(struct net_device *dev)
5376{
5377        struct bnxt *bp = netdev_priv(dev);
5378        int i;
5379
5380        for (i = 0; i < bp->cp_nr_rings; i++) {
5381                struct bnxt_irq *irq = &bp->irq_tbl[i];
5382
5383                disable_irq(irq->vector);
5384                irq->handler(irq->vector, bp->bnapi[i]);
5385                enable_irq(irq->vector);
5386        }
5387}
5388#endif
5389
5390static void bnxt_timer(unsigned long data)
5391{
5392        struct bnxt *bp = (struct bnxt *)data;
5393        struct net_device *dev = bp->dev;
5394
5395        if (!netif_running(dev))
5396                return;
5397
5398        if (atomic_read(&bp->intr_sem) != 0)
5399                goto bnxt_restart_timer;
5400
5401        if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
5402                set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
5403                schedule_work(&bp->sp_task);
5404        }
5405bnxt_restart_timer:
5406        mod_timer(&bp->timer, jiffies + bp->current_interval);
5407}
5408
5409static void bnxt_cfg_ntp_filters(struct bnxt *);
5410
5411static void bnxt_sp_task(struct work_struct *work)
5412{
5413        struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5414        int rc;
5415
5416        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5417        smp_mb__after_atomic();
5418        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5419                clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5420                return;
5421        }
5422
5423        if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5424                bnxt_cfg_rx_mode(bp);
5425
5426        if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5427                bnxt_cfg_ntp_filters(bp);
5428        if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5429                rc = bnxt_update_link(bp, true);
5430                if (rc)
5431                        netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5432                                   rc);
5433        }
5434        if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5435                bnxt_hwrm_exec_fwd_req(bp);
5436        if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5437                bnxt_hwrm_tunnel_dst_port_alloc(
5438                        bp, bp->vxlan_port,
5439                        TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5440        }
5441        if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5442                bnxt_hwrm_tunnel_dst_port_free(
5443                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5444        }
5445        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5446                /* bnxt_reset_task() calls bnxt_close_nic() which waits
5447                 * for BNXT_STATE_IN_SP_TASK to clear.
5448                 */
5449                clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5450                rtnl_lock();
5451                bnxt_reset_task(bp);
5452                set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5453                rtnl_unlock();
5454        }
5455
5456        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
5457                bnxt_hwrm_port_qstats(bp);
5458
5459        smp_mb__before_atomic();
5460        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5461}
5462
5463static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5464{
5465        int rc;
5466        struct bnxt *bp = netdev_priv(dev);
5467
5468        SET_NETDEV_DEV(dev, &pdev->dev);
5469
5470        /* enable device (incl. PCI PM wakeup), and bus-mastering */
5471        rc = pci_enable_device(pdev);
5472        if (rc) {
5473                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5474                goto init_err;
5475        }
5476
5477        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5478                dev_err(&pdev->dev,
5479                        "Cannot find PCI device base address, aborting\n");
5480                rc = -ENODEV;
5481                goto init_err_disable;
5482        }
5483
5484        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5485        if (rc) {
5486                dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5487                goto init_err_disable;
5488        }
5489
5490        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5491            dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5492                dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5493                goto init_err_disable;
5494        }
5495
5496        pci_set_master(pdev);
5497
5498        bp->dev = dev;
5499        bp->pdev = pdev;
5500
5501        bp->bar0 = pci_ioremap_bar(pdev, 0);
5502        if (!bp->bar0) {
5503                dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5504                rc = -ENOMEM;
5505                goto init_err_release;
5506        }
5507
5508        bp->bar1 = pci_ioremap_bar(pdev, 2);
5509        if (!bp->bar1) {
5510                dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5511                rc = -ENOMEM;
5512                goto init_err_release;
5513        }
5514
5515        bp->bar2 = pci_ioremap_bar(pdev, 4);
5516        if (!bp->bar2) {
5517                dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5518                rc = -ENOMEM;
5519                goto init_err_release;
5520        }
5521
5522        pci_enable_pcie_error_reporting(pdev);
5523
5524        INIT_WORK(&bp->sp_task, bnxt_sp_task);
5525
5526        spin_lock_init(&bp->ntp_fltr_lock);
5527
5528        bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5529        bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5530
5531        /* tick values in micro seconds */
5532        bp->rx_coal_ticks = 12;
5533        bp->rx_coal_bufs = 30;
5534        bp->rx_coal_ticks_irq = 1;
5535        bp->rx_coal_bufs_irq = 2;
5536
5537        bp->tx_coal_ticks = 25;
5538        bp->tx_coal_bufs = 30;
5539        bp->tx_coal_ticks_irq = 2;
5540        bp->tx_coal_bufs_irq = 2;
5541
5542        init_timer(&bp->timer);
5543        bp->timer.data = (unsigned long)bp;
5544        bp->timer.function = bnxt_timer;
5545        bp->current_interval = BNXT_TIMER_INTERVAL;
5546
5547        clear_bit(BNXT_STATE_OPEN, &bp->state);
5548
5549        return 0;
5550
5551init_err_release:
5552        if (bp->bar2) {
5553                pci_iounmap(pdev, bp->bar2);
5554                bp->bar2 = NULL;
5555        }
5556
5557        if (bp->bar1) {
5558                pci_iounmap(pdev, bp->bar1);
5559                bp->bar1 = NULL;
5560        }
5561
5562        if (bp->bar0) {
5563                pci_iounmap(pdev, bp->bar0);
5564                bp->bar0 = NULL;
5565        }
5566
5567        pci_release_regions(pdev);
5568
5569init_err_disable:
5570        pci_disable_device(pdev);
5571
5572init_err:
5573        return rc;
5574}
5575
5576/* rtnl_lock held */
5577static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5578{
5579        struct sockaddr *addr = p;
5580        struct bnxt *bp = netdev_priv(dev);
5581        int rc = 0;
5582
5583        if (!is_valid_ether_addr(addr->sa_data))
5584                return -EADDRNOTAVAIL;
5585
5586#ifdef CONFIG_BNXT_SRIOV
5587        if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5588                return -EADDRNOTAVAIL;
5589#endif
5590
5591        if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5592                return 0;
5593
5594        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5595        if (netif_running(dev)) {
5596                bnxt_close_nic(bp, false, false);
5597                rc = bnxt_open_nic(bp, false, false);
5598        }
5599
5600        return rc;
5601}
5602
5603/* rtnl_lock held */
5604static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5605{
5606        struct bnxt *bp = netdev_priv(dev);
5607
5608        if (new_mtu < 60 || new_mtu > 9000)
5609                return -EINVAL;
5610
5611        if (netif_running(dev))
5612                bnxt_close_nic(bp, false, false);
5613
5614        dev->mtu = new_mtu;
5615        bnxt_set_ring_params(bp);
5616
5617        if (netif_running(dev))
5618                return bnxt_open_nic(bp, false, false);
5619
5620        return 0;
5621}
5622
5623static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
5624                         struct tc_to_netdev *ntc)
5625{
5626        struct bnxt *bp = netdev_priv(dev);
5627        u8 tc;
5628
5629        if (ntc->type != TC_SETUP_MQPRIO)
5630                return -EINVAL;
5631
5632        tc = ntc->tc;
5633
5634        if (tc > bp->max_tc) {
5635                netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5636                           tc, bp->max_tc);
5637                return -EINVAL;
5638        }
5639
5640        if (netdev_get_num_tc(dev) == tc)
5641                return 0;
5642
5643        if (tc) {
5644                int max_rx_rings, max_tx_rings, rc;
5645                bool sh = false;
5646
5647                if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5648                        sh = true;
5649
5650                rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
5651                if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5652                        return -ENOMEM;
5653        }
5654
5655        /* Needs to close the device and do hw resource re-allocations */
5656        if (netif_running(bp->dev))
5657                bnxt_close_nic(bp, true, false);
5658
5659        if (tc) {
5660                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5661                netdev_set_num_tc(dev, tc);
5662        } else {
5663                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5664                netdev_reset_tc(dev);
5665        }
5666        bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5667        bp->num_stat_ctxs = bp->cp_nr_rings;
5668
5669        if (netif_running(bp->dev))
5670                return bnxt_open_nic(bp, true, false);
5671
5672        return 0;
5673}
5674
5675#ifdef CONFIG_RFS_ACCEL
5676static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5677                            struct bnxt_ntuple_filter *f2)
5678{
5679        struct flow_keys *keys1 = &f1->fkeys;
5680        struct flow_keys *keys2 = &f2->fkeys;
5681
5682        if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5683            keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5684            keys1->ports.ports == keys2->ports.ports &&
5685            keys1->basic.ip_proto == keys2->basic.ip_proto &&
5686            keys1->basic.n_proto == keys2->basic.n_proto &&
5687            ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5688                return true;
5689
5690        return false;
5691}
5692
5693static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5694                              u16 rxq_index, u32 flow_id)
5695{
5696        struct bnxt *bp = netdev_priv(dev);
5697        struct bnxt_ntuple_filter *fltr, *new_fltr;
5698        struct flow_keys *fkeys;
5699        struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
5700        int rc = 0, idx, bit_id;
5701        struct hlist_head *head;
5702
5703        if (skb->encapsulation)
5704                return -EPROTONOSUPPORT;
5705
5706        new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5707        if (!new_fltr)
5708                return -ENOMEM;
5709
5710        fkeys = &new_fltr->fkeys;
5711        if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5712                rc = -EPROTONOSUPPORT;
5713                goto err_free;
5714        }
5715
5716        if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5717            ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5718             (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5719                rc = -EPROTONOSUPPORT;
5720                goto err_free;
5721        }
5722
5723        memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5724
5725        idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5726        head = &bp->ntp_fltr_hash_tbl[idx];
5727        rcu_read_lock();
5728        hlist_for_each_entry_rcu(fltr, head, hash) {
5729                if (bnxt_fltr_match(fltr, new_fltr)) {
5730                        rcu_read_unlock();
5731                        rc = 0;
5732                        goto err_free;
5733                }
5734        }
5735        rcu_read_unlock();
5736
5737        spin_lock_bh(&bp->ntp_fltr_lock);
5738        bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5739                                         BNXT_NTP_FLTR_MAX_FLTR, 0);
5740        if (bit_id < 0) {
5741                spin_unlock_bh(&bp->ntp_fltr_lock);
5742                rc = -ENOMEM;
5743                goto err_free;
5744        }
5745
5746        new_fltr->sw_id = (u16)bit_id;
5747        new_fltr->flow_id = flow_id;
5748        new_fltr->rxq = rxq_index;
5749        hlist_add_head_rcu(&new_fltr->hash, head);
5750        bp->ntp_fltr_count++;
5751        spin_unlock_bh(&bp->ntp_fltr_lock);
5752
5753        set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5754        schedule_work(&bp->sp_task);
5755
5756        return new_fltr->sw_id;
5757
5758err_free:
5759        kfree(new_fltr);
5760        return rc;
5761}
5762
5763static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5764{
5765        int i;
5766
5767        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5768                struct hlist_head *head;
5769                struct hlist_node *tmp;
5770                struct bnxt_ntuple_filter *fltr;
5771                int rc;
5772
5773                head = &bp->ntp_fltr_hash_tbl[i];
5774                hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5775                        bool del = false;
5776
5777                        if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5778                                if (rps_may_expire_flow(bp->dev, fltr->rxq,
5779                                                        fltr->flow_id,
5780                                                        fltr->sw_id)) {
5781                                        bnxt_hwrm_cfa_ntuple_filter_free(bp,
5782                                                                         fltr);
5783                                        del = true;
5784                                }
5785                        } else {
5786                                rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5787                                                                       fltr);
5788                                if (rc)
5789                                        del = true;
5790                                else
5791                                        set_bit(BNXT_FLTR_VALID, &fltr->state);
5792                        }
5793
5794                        if (del) {
5795                                spin_lock_bh(&bp->ntp_fltr_lock);
5796                                hlist_del_rcu(&fltr->hash);
5797                                bp->ntp_fltr_count--;
5798                                spin_unlock_bh(&bp->ntp_fltr_lock);
5799                                synchronize_rcu();
5800                                clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5801                                kfree(fltr);
5802                        }
5803                }
5804        }
5805        if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
5806                netdev_info(bp->dev, "Receive PF driver unload event!");
5807}
5808
5809#else
5810
5811static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5812{
5813}
5814
5815#endif /* CONFIG_RFS_ACCEL */
5816
5817static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5818                                __be16 port)
5819{
5820        struct bnxt *bp = netdev_priv(dev);
5821
5822        if (!netif_running(dev))
5823                return;
5824
5825        if (sa_family != AF_INET6 && sa_family != AF_INET)
5826                return;
5827
5828        if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5829                return;
5830
5831        bp->vxlan_port_cnt++;
5832        if (bp->vxlan_port_cnt == 1) {
5833                bp->vxlan_port = port;
5834                set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5835                schedule_work(&bp->sp_task);
5836        }
5837}
5838
5839static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5840                                __be16 port)
5841{
5842        struct bnxt *bp = netdev_priv(dev);
5843
5844        if (!netif_running(dev))
5845                return;
5846
5847        if (sa_family != AF_INET6 && sa_family != AF_INET)
5848                return;
5849
5850        if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5851                bp->vxlan_port_cnt--;
5852
5853                if (bp->vxlan_port_cnt == 0) {
5854                        set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5855                        schedule_work(&bp->sp_task);
5856                }
5857        }
5858}
5859
5860static const struct net_device_ops bnxt_netdev_ops = {
5861        .ndo_open               = bnxt_open,
5862        .ndo_start_xmit         = bnxt_start_xmit,
5863        .ndo_stop               = bnxt_close,
5864        .ndo_get_stats64        = bnxt_get_stats64,
5865        .ndo_set_rx_mode        = bnxt_set_rx_mode,
5866        .ndo_do_ioctl           = bnxt_ioctl,
5867        .ndo_validate_addr      = eth_validate_addr,
5868        .ndo_set_mac_address    = bnxt_change_mac_addr,
5869        .ndo_change_mtu         = bnxt_change_mtu,
5870        .ndo_fix_features       = bnxt_fix_features,
5871        .ndo_set_features       = bnxt_set_features,
5872        .ndo_tx_timeout         = bnxt_tx_timeout,
5873#ifdef CONFIG_BNXT_SRIOV
5874        .ndo_get_vf_config      = bnxt_get_vf_config,
5875        .ndo_set_vf_mac         = bnxt_set_vf_mac,
5876        .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
5877        .ndo_set_vf_rate        = bnxt_set_vf_bw,
5878        .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
5879        .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
5880#endif
5881#ifdef CONFIG_NET_POLL_CONTROLLER
5882        .ndo_poll_controller    = bnxt_poll_controller,
5883#endif
5884        .ndo_setup_tc           = bnxt_setup_tc,
5885#ifdef CONFIG_RFS_ACCEL
5886        .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
5887#endif
5888        .ndo_add_vxlan_port     = bnxt_add_vxlan_port,
5889        .ndo_del_vxlan_port     = bnxt_del_vxlan_port,
5890#ifdef CONFIG_NET_RX_BUSY_POLL
5891        .ndo_busy_poll          = bnxt_busy_poll,
5892#endif
5893};
5894
5895static void bnxt_remove_one(struct pci_dev *pdev)
5896{
5897        struct net_device *dev = pci_get_drvdata(pdev);
5898        struct bnxt *bp = netdev_priv(dev);
5899
5900        if (BNXT_PF(bp))
5901                bnxt_sriov_disable(bp);
5902
5903        pci_disable_pcie_error_reporting(pdev);
5904        unregister_netdev(dev);
5905        cancel_work_sync(&bp->sp_task);
5906        bp->sp_event = 0;
5907
5908        bnxt_hwrm_func_drv_unrgtr(bp);
5909        bnxt_free_hwrm_resources(bp);
5910        pci_iounmap(pdev, bp->bar2);
5911        pci_iounmap(pdev, bp->bar1);
5912        pci_iounmap(pdev, bp->bar0);
5913        free_netdev(dev);
5914
5915        pci_release_regions(pdev);
5916        pci_disable_device(pdev);
5917}
5918
5919static int bnxt_probe_phy(struct bnxt *bp)
5920{
5921        int rc = 0;
5922        struct bnxt_link_info *link_info = &bp->link_info;
5923
5924        rc = bnxt_update_link(bp, false);
5925        if (rc) {
5926                netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5927                           rc);
5928                return rc;
5929        }
5930
5931        /*initialize the ethool setting copy with NVM settings */
5932        if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5933                link_info->autoneg = BNXT_AUTONEG_SPEED |
5934                                     BNXT_AUTONEG_FLOW_CTRL;
5935                link_info->advertising = link_info->auto_link_speeds;
5936                link_info->req_flow_ctrl = link_info->auto_pause_setting;
5937        } else {
5938                link_info->req_link_speed = link_info->force_link_speed;
5939                link_info->req_duplex = link_info->duplex_setting;
5940                link_info->req_flow_ctrl = link_info->force_pause_setting;
5941        }
5942        return rc;
5943}
5944
5945static int bnxt_get_max_irq(struct pci_dev *pdev)
5946{
5947        u16 ctrl;
5948
5949        if (!pdev->msix_cap)
5950                return 1;
5951
5952        pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
5953        return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
5954}
5955
5956static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
5957                                int *max_cp)
5958{
5959        int max_ring_grps = 0;
5960
5961#ifdef CONFIG_BNXT_SRIOV
5962        if (!BNXT_PF(bp)) {
5963                *max_tx = bp->vf.max_tx_rings;
5964                *max_rx = bp->vf.max_rx_rings;
5965                *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
5966                *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
5967                max_ring_grps = bp->vf.max_hw_ring_grps;
5968        } else
5969#endif
5970        {
5971                *max_tx = bp->pf.max_tx_rings;
5972                *max_rx = bp->pf.max_rx_rings;
5973                *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5974                *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
5975                max_ring_grps = bp->pf.max_hw_ring_grps;
5976        }
5977
5978        if (bp->flags & BNXT_FLAG_AGG_RINGS)
5979                *max_rx >>= 1;
5980        *max_rx = min_t(int, *max_rx, max_ring_grps);
5981}
5982
5983int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
5984{
5985        int rx, tx, cp;
5986
5987        _bnxt_get_max_rings(bp, &rx, &tx, &cp);
5988        if (!rx || !tx || !cp)
5989                return -ENOMEM;
5990
5991        *max_rx = rx;
5992        *max_tx = tx;
5993        return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
5994}
5995
5996static int bnxt_set_dflt_rings(struct bnxt *bp)
5997{
5998        int dflt_rings, max_rx_rings, max_tx_rings, rc;
5999        bool sh = true;
6000
6001        if (sh)
6002                bp->flags |= BNXT_FLAG_SHARED_RINGS;
6003        dflt_rings = netif_get_num_default_rss_queues();
6004        rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6005        if (rc)
6006                return rc;
6007        bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6008        bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6009        bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6010        bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6011                               bp->tx_nr_rings + bp->rx_nr_rings;
6012        bp->num_stat_ctxs = bp->cp_nr_rings;
6013        return rc;
6014}
6015
6016static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6017{
6018        static int version_printed;
6019        struct net_device *dev;
6020        struct bnxt *bp;
6021        int rc, max_irqs;
6022
6023        if (version_printed++ == 0)
6024                pr_info("%s", version);
6025
6026        max_irqs = bnxt_get_max_irq(pdev);
6027        dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6028        if (!dev)
6029                return -ENOMEM;
6030
6031        bp = netdev_priv(dev);
6032
6033        if (bnxt_vf_pciid(ent->driver_data))
6034                bp->flags |= BNXT_FLAG_VF;
6035
6036        if (pdev->msix_cap)
6037                bp->flags |= BNXT_FLAG_MSIX_CAP;
6038
6039        rc = bnxt_init_board(pdev, dev);
6040        if (rc < 0)
6041                goto init_err_free;
6042
6043        dev->netdev_ops = &bnxt_netdev_ops;
6044        dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6045        dev->ethtool_ops = &bnxt_ethtool_ops;
6046
6047        pci_set_drvdata(pdev, dev);
6048
6049        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6050                           NETIF_F_TSO | NETIF_F_TSO6 |
6051                           NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6052                           NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
6053                           NETIF_F_RXHASH |
6054                           NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
6055
6056        dev->hw_enc_features =
6057                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6058                        NETIF_F_TSO | NETIF_F_TSO6 |
6059                        NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6060                        NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
6061        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
6062        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6063                            NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
6064        dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
6065        dev->priv_flags |= IFF_UNICAST_FLT;
6066
6067#ifdef CONFIG_BNXT_SRIOV
6068        init_waitqueue_head(&bp->sriov_cfg_wait);
6069#endif
6070        rc = bnxt_alloc_hwrm_resources(bp);
6071        if (rc)
6072                goto init_err;
6073
6074        mutex_init(&bp->hwrm_cmd_lock);
6075        bnxt_hwrm_ver_get(bp);
6076
6077        rc = bnxt_hwrm_func_drv_rgtr(bp);
6078        if (rc)
6079                goto init_err;
6080
6081        /* Get the MAX capabilities for this function */
6082        rc = bnxt_hwrm_func_qcaps(bp);
6083        if (rc) {
6084                netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
6085                           rc);
6086                rc = -1;
6087                goto init_err;
6088        }
6089
6090        rc = bnxt_hwrm_queue_qportcfg(bp);
6091        if (rc) {
6092                netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
6093                           rc);
6094                rc = -1;
6095                goto init_err;
6096        }
6097
6098        bnxt_set_tpa_flags(bp);
6099        bnxt_set_ring_params(bp);
6100        if (BNXT_PF(bp))
6101                bp->pf.max_irqs = max_irqs;
6102#if defined(CONFIG_BNXT_SRIOV)
6103        else
6104                bp->vf.max_irqs = max_irqs;
6105#endif
6106        bnxt_set_dflt_rings(bp);
6107
6108        if (BNXT_PF(bp)) {
6109                dev->hw_features |= NETIF_F_NTUPLE;
6110                if (bnxt_rfs_capable(bp)) {
6111                        bp->flags |= BNXT_FLAG_RFS;
6112                        dev->features |= NETIF_F_NTUPLE;
6113                }
6114        }
6115
6116        if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
6117                bp->flags |= BNXT_FLAG_STRIP_VLAN;
6118
6119        rc = bnxt_probe_phy(bp);
6120        if (rc)
6121                goto init_err;
6122
6123        rc = register_netdev(dev);
6124        if (rc)
6125                goto init_err;
6126
6127        netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
6128                    board_info[ent->driver_data].name,
6129                    (long)pci_resource_start(pdev, 0), dev->dev_addr);
6130
6131        return 0;
6132
6133init_err:
6134        pci_iounmap(pdev, bp->bar0);
6135        pci_release_regions(pdev);
6136        pci_disable_device(pdev);
6137
6138init_err_free:
6139        free_netdev(dev);
6140        return rc;
6141}
6142
6143/**
6144 * bnxt_io_error_detected - called when PCI error is detected
6145 * @pdev: Pointer to PCI device
6146 * @state: The current pci connection state
6147 *
6148 * This function is called after a PCI bus error affecting
6149 * this device has been detected.
6150 */
6151static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
6152                                               pci_channel_state_t state)
6153{
6154        struct net_device *netdev = pci_get_drvdata(pdev);
6155
6156        netdev_info(netdev, "PCI I/O error detected\n");
6157
6158        rtnl_lock();
6159        netif_device_detach(netdev);
6160
6161        if (state == pci_channel_io_perm_failure) {
6162                rtnl_unlock();
6163                return PCI_ERS_RESULT_DISCONNECT;
6164        }
6165
6166        if (netif_running(netdev))
6167                bnxt_close(netdev);
6168
6169        pci_disable_device(pdev);
6170        rtnl_unlock();
6171
6172        /* Request a slot slot reset. */
6173        return PCI_ERS_RESULT_NEED_RESET;
6174}
6175
6176/**
6177 * bnxt_io_slot_reset - called after the pci bus has been reset.
6178 * @pdev: Pointer to PCI device
6179 *
6180 * Restart the card from scratch, as if from a cold-boot.
6181 * At this point, the card has exprienced a hard reset,
6182 * followed by fixups by BIOS, and has its config space
6183 * set up identically to what it was at cold boot.
6184 */
6185static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
6186{
6187        struct net_device *netdev = pci_get_drvdata(pdev);
6188        struct bnxt *bp = netdev_priv(netdev);
6189        int err = 0;
6190        pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6191
6192        netdev_info(bp->dev, "PCI Slot Reset\n");
6193
6194        rtnl_lock();
6195
6196        if (pci_enable_device(pdev)) {
6197                dev_err(&pdev->dev,
6198                        "Cannot re-enable PCI device after reset.\n");
6199        } else {
6200                pci_set_master(pdev);
6201
6202                if (netif_running(netdev))
6203                        err = bnxt_open(netdev);
6204
6205                if (!err)
6206                        result = PCI_ERS_RESULT_RECOVERED;
6207        }
6208
6209        if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
6210                dev_close(netdev);
6211
6212        rtnl_unlock();
6213
6214        err = pci_cleanup_aer_uncorrect_error_status(pdev);
6215        if (err) {
6216                dev_err(&pdev->dev,
6217                        "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
6218                         err); /* non-fatal, continue */
6219        }
6220
6221        return PCI_ERS_RESULT_RECOVERED;
6222}
6223
6224/**
6225 * bnxt_io_resume - called when traffic can start flowing again.
6226 * @pdev: Pointer to PCI device
6227 *
6228 * This callback is called when the error recovery driver tells
6229 * us that its OK to resume normal operation.
6230 */
6231static void bnxt_io_resume(struct pci_dev *pdev)
6232{
6233        struct net_device *netdev = pci_get_drvdata(pdev);
6234
6235        rtnl_lock();
6236
6237        netif_device_attach(netdev);
6238
6239        rtnl_unlock();
6240}
6241
6242static const struct pci_error_handlers bnxt_err_handler = {
6243        .error_detected = bnxt_io_error_detected,
6244        .slot_reset     = bnxt_io_slot_reset,
6245        .resume         = bnxt_io_resume
6246};
6247
6248static struct pci_driver bnxt_pci_driver = {
6249        .name           = DRV_MODULE_NAME,
6250        .id_table       = bnxt_pci_tbl,
6251        .probe          = bnxt_init_one,
6252        .remove         = bnxt_remove_one,
6253        .err_handler    = &bnxt_err_handler,
6254#if defined(CONFIG_BNXT_SRIOV)
6255        .sriov_configure = bnxt_sriov_configure,
6256#endif
6257};
6258
6259module_pci_driver(bnxt_pci_driver);
6260