linux/drivers/net/benet/be_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2010 ServerEngines
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@serverengines.com
  12 *
  13 * ServerEngines
  14 * 209 N. Fair Oaks Ave
  15 * Sunnyvale, CA 94085
  16 */
  17
  18#include "be.h"
  19#include "be_cmds.h"
  20#include <asm/div64.h>
  21
  22MODULE_VERSION(DRV_VER);
  23MODULE_DEVICE_TABLE(pci, be_dev_ids);
  24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
  25MODULE_AUTHOR("ServerEngines Corporation");
  26MODULE_LICENSE("GPL");
  27
  28static unsigned int rx_frag_size = 2048;
  29static unsigned int num_vfs;
  30module_param(rx_frag_size, uint, S_IRUGO);
  31module_param(num_vfs, uint, S_IRUGO);
  32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
  33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
  34
  35static bool multi_rxq = true;
  36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
  37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
  38
  39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
  40        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  41        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
  42        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  43        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  44        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
  45        { 0 }
  46};
  47MODULE_DEVICE_TABLE(pci, be_dev_ids);
  48/* UE Status Low CSR */
  49static char *ue_status_low_desc[] = {
  50        "CEV",
  51        "CTX",
  52        "DBUF",
  53        "ERX",
  54        "Host",
  55        "MPU",
  56        "NDMA",
  57        "PTC ",
  58        "RDMA ",
  59        "RXF ",
  60        "RXIPS ",
  61        "RXULP0 ",
  62        "RXULP1 ",
  63        "RXULP2 ",
  64        "TIM ",
  65        "TPOST ",
  66        "TPRE ",
  67        "TXIPS ",
  68        "TXULP0 ",
  69        "TXULP1 ",
  70        "UC ",
  71        "WDMA ",
  72        "TXULP2 ",
  73        "HOST1 ",
  74        "P0_OB_LINK ",
  75        "P1_OB_LINK ",
  76        "HOST_GPIO ",
  77        "MBOX ",
  78        "AXGMAC0",
  79        "AXGMAC1",
  80        "JTAG",
  81        "MPU_INTPEND"
  82};
  83/* UE Status High CSR */
  84static char *ue_status_hi_desc[] = {
  85        "LPCMEMHOST",
  86        "MGMT_MAC",
  87        "PCS0ONLINE",
  88        "MPU_IRAM",
  89        "PCS1ONLINE",
  90        "PCTL0",
  91        "PCTL1",
  92        "PMEM",
  93        "RR",
  94        "TXPB",
  95        "RXPP",
  96        "XAUI",
  97        "TXP",
  98        "ARM",
  99        "IPC",
 100        "HOST2",
 101        "HOST3",
 102        "HOST4",
 103        "HOST5",
 104        "HOST6",
 105        "HOST7",
 106        "HOST8",
 107        "HOST9",
 108        "NETC"
 109        "Unknown",
 110        "Unknown",
 111        "Unknown",
 112        "Unknown",
 113        "Unknown",
 114        "Unknown",
 115        "Unknown",
 116        "Unknown"
 117};
 118
 119static inline bool be_multi_rxq(struct be_adapter *adapter)
 120{
 121        return (adapter->num_rx_qs > 1);
 122}
 123
 124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 125{
 126        struct be_dma_mem *mem = &q->dma_mem;
 127        if (mem->va)
 128                pci_free_consistent(adapter->pdev, mem->size,
 129                        mem->va, mem->dma);
 130}
 131
 132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
 133                u16 len, u16 entry_size)
 134{
 135        struct be_dma_mem *mem = &q->dma_mem;
 136
 137        memset(q, 0, sizeof(*q));
 138        q->len = len;
 139        q->entry_size = entry_size;
 140        mem->size = len * entry_size;
 141        mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
 142        if (!mem->va)
 143                return -1;
 144        memset(mem->va, 0, mem->size);
 145        return 0;
 146}
 147
 148static void be_intr_set(struct be_adapter *adapter, bool enable)
 149{
 150        u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
 151        u32 reg = ioread32(addr);
 152        u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 153
 154        if (adapter->eeh_err)
 155                return;
 156
 157        if (!enabled && enable)
 158                reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 159        else if (enabled && !enable)
 160                reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 161        else
 162                return;
 163
 164        iowrite32(reg, addr);
 165}
 166
 167static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 168{
 169        u32 val = 0;
 170        val |= qid & DB_RQ_RING_ID_MASK;
 171        val |= posted << DB_RQ_NUM_POSTED_SHIFT;
 172
 173        wmb();
 174        iowrite32(val, adapter->db + DB_RQ_OFFSET);
 175}
 176
 177static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 178{
 179        u32 val = 0;
 180        val |= qid & DB_TXULP_RING_ID_MASK;
 181        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
 182
 183        wmb();
 184        iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
 185}
 186
 187static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 188                bool arm, bool clear_int, u16 num_popped)
 189{
 190        u32 val = 0;
 191        val |= qid & DB_EQ_RING_ID_MASK;
 192        val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
 193                        DB_EQ_RING_ID_EXT_MASK_SHIFT);
 194
 195        if (adapter->eeh_err)
 196                return;
 197
 198        if (arm)
 199                val |= 1 << DB_EQ_REARM_SHIFT;
 200        if (clear_int)
 201                val |= 1 << DB_EQ_CLR_SHIFT;
 202        val |= 1 << DB_EQ_EVNT_SHIFT;
 203        val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
 204        iowrite32(val, adapter->db + DB_EQ_OFFSET);
 205}
 206
 207void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
 208{
 209        u32 val = 0;
 210        val |= qid & DB_CQ_RING_ID_MASK;
 211        val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
 212                        DB_CQ_RING_ID_EXT_MASK_SHIFT);
 213
 214        if (adapter->eeh_err)
 215                return;
 216
 217        if (arm)
 218                val |= 1 << DB_CQ_REARM_SHIFT;
 219        val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
 220        iowrite32(val, adapter->db + DB_CQ_OFFSET);
 221}
 222
 223static int be_mac_addr_set(struct net_device *netdev, void *p)
 224{
 225        struct be_adapter *adapter = netdev_priv(netdev);
 226        struct sockaddr *addr = p;
 227        int status = 0;
 228
 229        if (!is_valid_ether_addr(addr->sa_data))
 230                return -EADDRNOTAVAIL;
 231
 232        /* MAC addr configuration will be done in hardware for VFs
 233         * by their corresponding PFs. Just copy to netdev addr here
 234         */
 235        if (!be_physfn(adapter))
 236                goto netdev_addr;
 237
 238        status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
 239        if (status)
 240                return status;
 241
 242        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
 243                        adapter->if_handle, &adapter->pmac_id);
 244netdev_addr:
 245        if (!status)
 246                memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 247
 248        return status;
 249}
 250
 251void netdev_stats_update(struct be_adapter *adapter)
 252{
 253        struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
 254        struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
 255        struct be_port_rxf_stats *port_stats =
 256                        &rxf_stats->port[adapter->port_num];
 257        struct net_device_stats *dev_stats = &adapter->netdev->stats;
 258        struct be_erx_stats *erx_stats = &hw_stats->erx;
 259        struct be_rx_obj *rxo;
 260        int i;
 261
 262        memset(dev_stats, 0, sizeof(*dev_stats));
 263        for_all_rx_queues(adapter, rxo, i) {
 264                dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
 265                dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
 266                dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
 267                /*  no space in linux buffers: best possible approximation */
 268                dev_stats->rx_dropped +=
 269                        erx_stats->rx_drops_no_fragments[rxo->q.id];
 270        }
 271
 272        dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
 273        dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
 274
 275        /* bad pkts received */
 276        dev_stats->rx_errors = port_stats->rx_crc_errors +
 277                port_stats->rx_alignment_symbol_errors +
 278                port_stats->rx_in_range_errors +
 279                port_stats->rx_out_range_errors +
 280                port_stats->rx_frame_too_long +
 281                port_stats->rx_dropped_too_small +
 282                port_stats->rx_dropped_too_short +
 283                port_stats->rx_dropped_header_too_small +
 284                port_stats->rx_dropped_tcp_length +
 285                port_stats->rx_dropped_runt +
 286                port_stats->rx_tcp_checksum_errs +
 287                port_stats->rx_ip_checksum_errs +
 288                port_stats->rx_udp_checksum_errs;
 289
 290        /* detailed rx errors */
 291        dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
 292                port_stats->rx_out_range_errors +
 293                port_stats->rx_frame_too_long;
 294
 295        dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
 296
 297        /* frame alignment errors */
 298        dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
 299
 300        /* receiver fifo overrun */
 301        /* drops_no_pbuf is no per i/f, it's per BE card */
 302        dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
 303                                        port_stats->rx_input_fifo_overflow +
 304                                        rxf_stats->rx_drops_no_pbuf;
 305}
 306
 307void be_link_status_update(struct be_adapter *adapter, bool link_up)
 308{
 309        struct net_device *netdev = adapter->netdev;
 310
 311        /* If link came up or went down */
 312        if (adapter->link_up != link_up) {
 313                adapter->link_speed = -1;
 314                if (link_up) {
 315                        netif_carrier_on(netdev);
 316                        printk(KERN_INFO "%s: Link up\n", netdev->name);
 317                } else {
 318                        netif_carrier_off(netdev);
 319                        printk(KERN_INFO "%s: Link down\n", netdev->name);
 320                }
 321                adapter->link_up = link_up;
 322        }
 323}
 324
 325/* Update the EQ delay n BE based on the RX frags consumed / sec */
 326static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
 327{
 328        struct be_eq_obj *rx_eq = &rxo->rx_eq;
 329        struct be_rx_stats *stats = &rxo->stats;
 330        ulong now = jiffies;
 331        u32 eqd;
 332
 333        if (!rx_eq->enable_aic)
 334                return;
 335
 336        /* Wrapped around */
 337        if (time_before(now, stats->rx_fps_jiffies)) {
 338                stats->rx_fps_jiffies = now;
 339                return;
 340        }
 341
 342        /* Update once a second */
 343        if ((now - stats->rx_fps_jiffies) < HZ)
 344                return;
 345
 346        stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
 347                        ((now - stats->rx_fps_jiffies) / HZ);
 348
 349        stats->rx_fps_jiffies = now;
 350        stats->prev_rx_frags = stats->rx_frags;
 351        eqd = stats->rx_fps / 110000;
 352        eqd = eqd << 3;
 353        if (eqd > rx_eq->max_eqd)
 354                eqd = rx_eq->max_eqd;
 355        if (eqd < rx_eq->min_eqd)
 356                eqd = rx_eq->min_eqd;
 357        if (eqd < 10)
 358                eqd = 0;
 359        if (eqd != rx_eq->cur_eqd)
 360                be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
 361
 362        rx_eq->cur_eqd = eqd;
 363}
 364
 365static u32 be_calc_rate(u64 bytes, unsigned long ticks)
 366{
 367        u64 rate = bytes;
 368
 369        do_div(rate, ticks / HZ);
 370        rate <<= 3;                     /* bytes/sec -> bits/sec */
 371        do_div(rate, 1000000ul);        /* MB/Sec */
 372
 373        return rate;
 374}
 375
 376static void be_tx_rate_update(struct be_adapter *adapter)
 377{
 378        struct be_tx_stats *stats = tx_stats(adapter);
 379        ulong now = jiffies;
 380
 381        /* Wrapped around? */
 382        if (time_before(now, stats->be_tx_jiffies)) {
 383                stats->be_tx_jiffies = now;
 384                return;
 385        }
 386
 387        /* Update tx rate once in two seconds */
 388        if ((now - stats->be_tx_jiffies) > 2 * HZ) {
 389                stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
 390                                                  - stats->be_tx_bytes_prev,
 391                                                 now - stats->be_tx_jiffies);
 392                stats->be_tx_jiffies = now;
 393                stats->be_tx_bytes_prev = stats->be_tx_bytes;
 394        }
 395}
 396
 397static void be_tx_stats_update(struct be_adapter *adapter,
 398                        u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
 399{
 400        struct be_tx_stats *stats = tx_stats(adapter);
 401        stats->be_tx_reqs++;
 402        stats->be_tx_wrbs += wrb_cnt;
 403        stats->be_tx_bytes += copied;
 404        stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
 405        if (stopped)
 406                stats->be_tx_stops++;
 407}
 408
 409/* Determine number of WRB entries needed to xmit data in an skb */
 410static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
 411                                                                bool *dummy)
 412{
 413        int cnt = (skb->len > skb->data_len);
 414
 415        cnt += skb_shinfo(skb)->nr_frags;
 416
 417        /* to account for hdr wrb */
 418        cnt++;
 419        if (lancer_chip(adapter) || !(cnt & 1)) {
 420                *dummy = false;
 421        } else {
 422                /* add a dummy to make it an even num */
 423                cnt++;
 424                *dummy = true;
 425        }
 426        BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
 427        return cnt;
 428}
 429
 430static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 431{
 432        wrb->frag_pa_hi = upper_32_bits(addr);
 433        wrb->frag_pa_lo = addr & 0xFFFFFFFF;
 434        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 435}
 436
 437static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 438                struct sk_buff *skb, u32 wrb_cnt, u32 len)
 439{
 440        u8 vlan_prio = 0;
 441        u16 vlan_tag = 0;
 442
 443        memset(hdr, 0, sizeof(*hdr));
 444
 445        AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
 446
 447        if (skb_is_gso(skb)) {
 448                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
 449                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
 450                        hdr, skb_shinfo(skb)->gso_size);
 451                if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
 452                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
 453                if (lancer_chip(adapter) && adapter->sli_family  ==
 454                                                        LANCER_A0_SLI_FAMILY) {
 455                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
 456                        if (is_tcp_pkt(skb))
 457                                AMAP_SET_BITS(struct amap_eth_hdr_wrb,
 458                                                                tcpcs, hdr, 1);
 459                        else if (is_udp_pkt(skb))
 460                                AMAP_SET_BITS(struct amap_eth_hdr_wrb,
 461                                                                udpcs, hdr, 1);
 462                }
 463        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 464                if (is_tcp_pkt(skb))
 465                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
 466                else if (is_udp_pkt(skb))
 467                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
 468        }
 469
 470        if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
 471                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
 472                vlan_tag = vlan_tx_tag_get(skb);
 473                vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 474                /* If vlan priority provided by OS is NOT in available bmap */
 475                if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
 476                        vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
 477                                        adapter->recommended_prio;
 478                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
 479        }
 480
 481        AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
 482        AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
 483        AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
 484        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 485}
 486
 487static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
 488                bool unmap_single)
 489{
 490        dma_addr_t dma;
 491
 492        be_dws_le_to_cpu(wrb, sizeof(*wrb));
 493
 494        dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
 495        if (wrb->frag_len) {
 496                if (unmap_single)
 497                        pci_unmap_single(pdev, dma, wrb->frag_len,
 498                                PCI_DMA_TODEVICE);
 499                else
 500                        pci_unmap_page(pdev, dma, wrb->frag_len,
 501                                PCI_DMA_TODEVICE);
 502        }
 503}
 504
 505static int make_tx_wrbs(struct be_adapter *adapter,
 506                struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
 507{
 508        dma_addr_t busaddr;
 509        int i, copied = 0;
 510        struct pci_dev *pdev = adapter->pdev;
 511        struct sk_buff *first_skb = skb;
 512        struct be_queue_info *txq = &adapter->tx_obj.q;
 513        struct be_eth_wrb *wrb;
 514        struct be_eth_hdr_wrb *hdr;
 515        bool map_single = false;
 516        u16 map_head;
 517
 518        hdr = queue_head_node(txq);
 519        queue_head_inc(txq);
 520        map_head = txq->head;
 521
 522        if (skb->len > skb->data_len) {
 523                int len = skb_headlen(skb);
 524                busaddr = pci_map_single(pdev, skb->data, len,
 525                                         PCI_DMA_TODEVICE);
 526                if (pci_dma_mapping_error(pdev, busaddr))
 527                        goto dma_err;
 528                map_single = true;
 529                wrb = queue_head_node(txq);
 530                wrb_fill(wrb, busaddr, len);
 531                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 532                queue_head_inc(txq);
 533                copied += len;
 534        }
 535
 536        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 537                struct skb_frag_struct *frag =
 538                        &skb_shinfo(skb)->frags[i];
 539                busaddr = pci_map_page(pdev, frag->page,
 540                                       frag->page_offset,
 541                                       frag->size, PCI_DMA_TODEVICE);
 542                if (pci_dma_mapping_error(pdev, busaddr))
 543                        goto dma_err;
 544                wrb = queue_head_node(txq);
 545                wrb_fill(wrb, busaddr, frag->size);
 546                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 547                queue_head_inc(txq);
 548                copied += frag->size;
 549        }
 550
 551        if (dummy_wrb) {
 552                wrb = queue_head_node(txq);
 553                wrb_fill(wrb, 0, 0);
 554                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 555                queue_head_inc(txq);
 556        }
 557
 558        wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
 559        be_dws_cpu_to_le(hdr, sizeof(*hdr));
 560
 561        return copied;
 562dma_err:
 563        txq->head = map_head;
 564        while (copied) {
 565                wrb = queue_head_node(txq);
 566                unmap_tx_frag(pdev, wrb, map_single);
 567                map_single = false;
 568                copied -= wrb->frag_len;
 569                queue_head_inc(txq);
 570        }
 571        return 0;
 572}
 573
 574static netdev_tx_t be_xmit(struct sk_buff *skb,
 575                        struct net_device *netdev)
 576{
 577        struct be_adapter *adapter = netdev_priv(netdev);
 578        struct be_tx_obj *tx_obj = &adapter->tx_obj;
 579        struct be_queue_info *txq = &tx_obj->q;
 580        u32 wrb_cnt = 0, copied = 0;
 581        u32 start = txq->head;
 582        bool dummy_wrb, stopped = false;
 583
 584        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 585
 586        copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
 587        if (copied) {
 588                /* record the sent skb in the sent_skb table */
 589                BUG_ON(tx_obj->sent_skb_list[start]);
 590                tx_obj->sent_skb_list[start] = skb;
 591
 592                /* Ensure txq has space for the next skb; Else stop the queue
 593                 * *BEFORE* ringing the tx doorbell, so that we serialze the
 594                 * tx compls of the current transmit which'll wake up the queue
 595                 */
 596                atomic_add(wrb_cnt, &txq->used);
 597                if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
 598                                                                txq->len) {
 599                        netif_stop_queue(netdev);
 600                        stopped = true;
 601                }
 602
 603                be_txq_notify(adapter, txq->id, wrb_cnt);
 604
 605                be_tx_stats_update(adapter, wrb_cnt, copied,
 606                                skb_shinfo(skb)->gso_segs, stopped);
 607        } else {
 608                txq->head = start;
 609                dev_kfree_skb_any(skb);
 610        }
 611        return NETDEV_TX_OK;
 612}
 613
 614static int be_change_mtu(struct net_device *netdev, int new_mtu)
 615{
 616        struct be_adapter *adapter = netdev_priv(netdev);
 617        if (new_mtu < BE_MIN_MTU ||
 618                        new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
 619                                        (ETH_HLEN + ETH_FCS_LEN))) {
 620                dev_info(&adapter->pdev->dev,
 621                        "MTU must be between %d and %d bytes\n",
 622                        BE_MIN_MTU,
 623                        (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
 624                return -EINVAL;
 625        }
 626        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
 627                        netdev->mtu, new_mtu);
 628        netdev->mtu = new_mtu;
 629        return 0;
 630}
 631
 632/*
 633 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 634 * If the user configures more, place BE in vlan promiscuous mode.
 635 */
 636static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 637{
 638        u16 vtag[BE_NUM_VLANS_SUPPORTED];
 639        u16 ntags = 0, i;
 640        int status = 0;
 641        u32 if_handle;
 642
 643        if (vf) {
 644                if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
 645                vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
 646                status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
 647        }
 648
 649        if (adapter->vlans_added <= adapter->max_vlans)  {
 650                /* Construct VLAN Table to give to HW */
 651                for (i = 0; i < VLAN_N_VID; i++) {
 652                        if (adapter->vlan_tag[i]) {
 653                                vtag[ntags] = cpu_to_le16(i);
 654                                ntags++;
 655                        }
 656                }
 657                status = be_cmd_vlan_config(adapter, adapter->if_handle,
 658                                        vtag, ntags, 1, 0);
 659        } else {
 660                status = be_cmd_vlan_config(adapter, adapter->if_handle,
 661                                        NULL, 0, 1, 1);
 662        }
 663
 664        return status;
 665}
 666
 667static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
 668{
 669        struct be_adapter *adapter = netdev_priv(netdev);
 670
 671        adapter->vlan_grp = grp;
 672}
 673
 674static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
 675{
 676        struct be_adapter *adapter = netdev_priv(netdev);
 677
 678        adapter->vlans_added++;
 679        if (!be_physfn(adapter))
 680                return;
 681
 682        adapter->vlan_tag[vid] = 1;
 683        if (adapter->vlans_added <= (adapter->max_vlans + 1))
 684                be_vid_config(adapter, false, 0);
 685}
 686
 687static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 688{
 689        struct be_adapter *adapter = netdev_priv(netdev);
 690
 691        adapter->vlans_added--;
 692        vlan_group_set_device(adapter->vlan_grp, vid, NULL);
 693
 694        if (!be_physfn(adapter))
 695                return;
 696
 697        adapter->vlan_tag[vid] = 0;
 698        if (adapter->vlans_added <= adapter->max_vlans)
 699                be_vid_config(adapter, false, 0);
 700}
 701
 702static void be_set_multicast_list(struct net_device *netdev)
 703{
 704        struct be_adapter *adapter = netdev_priv(netdev);
 705
 706        if (netdev->flags & IFF_PROMISC) {
 707                be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
 708                adapter->promiscuous = true;
 709                goto done;
 710        }
 711
 712        /* BE was previously in promiscous mode; disable it */
 713        if (adapter->promiscuous) {
 714                adapter->promiscuous = false;
 715                be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
 716        }
 717
 718        /* Enable multicast promisc if num configured exceeds what we support */
 719        if (netdev->flags & IFF_ALLMULTI ||
 720            netdev_mc_count(netdev) > BE_MAX_MC) {
 721                be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
 722                                &adapter->mc_cmd_mem);
 723                goto done;
 724        }
 725
 726        be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
 727                &adapter->mc_cmd_mem);
 728done:
 729        return;
 730}
 731
 732static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 733{
 734        struct be_adapter *adapter = netdev_priv(netdev);
 735        int status;
 736
 737        if (!adapter->sriov_enabled)
 738                return -EPERM;
 739
 740        if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
 741                return -EINVAL;
 742
 743        if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
 744                status = be_cmd_pmac_del(adapter,
 745                                        adapter->vf_cfg[vf].vf_if_handle,
 746                                        adapter->vf_cfg[vf].vf_pmac_id);
 747
 748        status = be_cmd_pmac_add(adapter, mac,
 749                                adapter->vf_cfg[vf].vf_if_handle,
 750                                &adapter->vf_cfg[vf].vf_pmac_id);
 751
 752        if (status)
 753                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
 754                                mac, vf);
 755        else
 756                memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
 757
 758        return status;
 759}
 760
 761static int be_get_vf_config(struct net_device *netdev, int vf,
 762                        struct ifla_vf_info *vi)
 763{
 764        struct be_adapter *adapter = netdev_priv(netdev);
 765
 766        if (!adapter->sriov_enabled)
 767                return -EPERM;
 768
 769        if (vf >= num_vfs)
 770                return -EINVAL;
 771
 772        vi->vf = vf;
 773        vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
 774        vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
 775        vi->qos = 0;
 776        memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
 777
 778        return 0;
 779}
 780
 781static int be_set_vf_vlan(struct net_device *netdev,
 782                        int vf, u16 vlan, u8 qos)
 783{
 784        struct be_adapter *adapter = netdev_priv(netdev);
 785        int status = 0;
 786
 787        if (!adapter->sriov_enabled)
 788                return -EPERM;
 789
 790        if ((vf >= num_vfs) || (vlan > 4095))
 791                return -EINVAL;
 792
 793        if (vlan) {
 794                adapter->vf_cfg[vf].vf_vlan_tag = vlan;
 795                adapter->vlans_added++;
 796        } else {
 797                adapter->vf_cfg[vf].vf_vlan_tag = 0;
 798                adapter->vlans_added--;
 799        }
 800
 801        status = be_vid_config(adapter, true, vf);
 802
 803        if (status)
 804                dev_info(&adapter->pdev->dev,
 805                                "VLAN %d config on VF %d failed\n", vlan, vf);
 806        return status;
 807}
 808
 809static int be_set_vf_tx_rate(struct net_device *netdev,
 810                        int vf, int rate)
 811{
 812        struct be_adapter *adapter = netdev_priv(netdev);
 813        int status = 0;
 814
 815        if (!adapter->sriov_enabled)
 816                return -EPERM;
 817
 818        if ((vf >= num_vfs) || (rate < 0))
 819                return -EINVAL;
 820
 821        if (rate > 10000)
 822                rate = 10000;
 823
 824        adapter->vf_cfg[vf].vf_tx_rate = rate;
 825        status = be_cmd_set_qos(adapter, rate / 10, vf);
 826
 827        if (status)
 828                dev_info(&adapter->pdev->dev,
 829                                "tx rate %d on VF %d failed\n", rate, vf);
 830        return status;
 831}
 832
 833static void be_rx_rate_update(struct be_rx_obj *rxo)
 834{
 835        struct be_rx_stats *stats = &rxo->stats;
 836        ulong now = jiffies;
 837
 838        /* Wrapped around */
 839        if (time_before(now, stats->rx_jiffies)) {
 840                stats->rx_jiffies = now;
 841                return;
 842        }
 843
 844        /* Update the rate once in two seconds */
 845        if ((now - stats->rx_jiffies) < 2 * HZ)
 846                return;
 847
 848        stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
 849                                now - stats->rx_jiffies);
 850        stats->rx_jiffies = now;
 851        stats->rx_bytes_prev = stats->rx_bytes;
 852}
 853
 854static void be_rx_stats_update(struct be_rx_obj *rxo,
 855                u32 pktsize, u16 numfrags, u8 pkt_type)
 856{
 857        struct be_rx_stats *stats = &rxo->stats;
 858
 859        stats->rx_compl++;
 860        stats->rx_frags += numfrags;
 861        stats->rx_bytes += pktsize;
 862        stats->rx_pkts++;
 863        if (pkt_type == BE_MULTICAST_PACKET)
 864                stats->rx_mcast_pkts++;
 865}
 866
 867static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
 868{
 869        u8 l4_cksm, ipv6, ipcksm;
 870
 871        l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
 872        ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
 873        ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
 874
 875        /* Ignore ipcksm for ipv6 pkts */
 876        return l4_cksm && (ipcksm || ipv6);
 877}
 878
 879static struct be_rx_page_info *
 880get_rx_page_info(struct be_adapter *adapter,
 881                struct be_rx_obj *rxo,
 882                u16 frag_idx)
 883{
 884        struct be_rx_page_info *rx_page_info;
 885        struct be_queue_info *rxq = &rxo->q;
 886
 887        rx_page_info = &rxo->page_info_tbl[frag_idx];
 888        BUG_ON(!rx_page_info->page);
 889
 890        if (rx_page_info->last_page_user) {
 891                pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
 892                        adapter->big_page_size, PCI_DMA_FROMDEVICE);
 893                rx_page_info->last_page_user = false;
 894        }
 895
 896        atomic_dec(&rxq->used);
 897        return rx_page_info;
 898}
 899
 900/* Throwaway the data in the Rx completion */
 901static void be_rx_compl_discard(struct be_adapter *adapter,
 902                struct be_rx_obj *rxo,
 903                struct be_eth_rx_compl *rxcp)
 904{
 905        struct be_queue_info *rxq = &rxo->q;
 906        struct be_rx_page_info *page_info;
 907        u16 rxq_idx, i, num_rcvd;
 908
 909        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
 910        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
 911
 912         /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
 913        if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
 914
 915                rxo->last_frag_index = rxq_idx;
 916
 917                for (i = 0; i < num_rcvd; i++) {
 918                        page_info = get_rx_page_info(adapter, rxo, rxq_idx);
 919                        put_page(page_info->page);
 920                        memset(page_info, 0, sizeof(*page_info));
 921                        index_inc(&rxq_idx, rxq->len);
 922                }
 923        }
 924}
 925
 926/*
 927 * skb_fill_rx_data forms a complete skb for an ether frame
 928 * indicated by rxcp.
 929 */
 930static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
 931                        struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
 932                        u16 num_rcvd)
 933{
 934        struct be_queue_info *rxq = &rxo->q;
 935        struct be_rx_page_info *page_info;
 936        u16 rxq_idx, i, j;
 937        u32 pktsize, hdr_len, curr_frag_len, size;
 938        u8 *start;
 939        u8 pkt_type;
 940
 941        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
 942        pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
 943        pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 944
 945        page_info = get_rx_page_info(adapter, rxo, rxq_idx);
 946
 947        start = page_address(page_info->page) + page_info->page_offset;
 948        prefetch(start);
 949
 950        /* Copy data in the first descriptor of this completion */
 951        curr_frag_len = min(pktsize, rx_frag_size);
 952
 953        /* Copy the header portion into skb_data */
 954        hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
 955        memcpy(skb->data, start, hdr_len);
 956        skb->len = curr_frag_len;
 957        if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
 958                /* Complete packet has now been moved to data */
 959                put_page(page_info->page);
 960                skb->data_len = 0;
 961                skb->tail += curr_frag_len;
 962        } else {
 963                skb_shinfo(skb)->nr_frags = 1;
 964                skb_shinfo(skb)->frags[0].page = page_info->page;
 965                skb_shinfo(skb)->frags[0].page_offset =
 966                                        page_info->page_offset + hdr_len;
 967                skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
 968                skb->data_len = curr_frag_len - hdr_len;
 969                skb->tail += hdr_len;
 970        }
 971        page_info->page = NULL;
 972
 973        if (pktsize <= rx_frag_size) {
 974                BUG_ON(num_rcvd != 1);
 975                goto done;
 976        }
 977
 978        /* More frags present for this completion */
 979        size = pktsize;
 980        for (i = 1, j = 0; i < num_rcvd; i++) {
 981                size -= curr_frag_len;
 982                index_inc(&rxq_idx, rxq->len);
 983                page_info = get_rx_page_info(adapter, rxo, rxq_idx);
 984
 985                curr_frag_len = min(size, rx_frag_size);
 986
 987                /* Coalesce all frags from the same physical page in one slot */
 988                if (page_info->page_offset == 0) {
 989                        /* Fresh page */
 990                        j++;
 991                        skb_shinfo(skb)->frags[j].page = page_info->page;
 992                        skb_shinfo(skb)->frags[j].page_offset =
 993                                                        page_info->page_offset;
 994                        skb_shinfo(skb)->frags[j].size = 0;
 995                        skb_shinfo(skb)->nr_frags++;
 996                } else {
 997                        put_page(page_info->page);
 998                }
 999
1000                skb_shinfo(skb)->frags[j].size += curr_frag_len;
1001                skb->len += curr_frag_len;
1002                skb->data_len += curr_frag_len;
1003
1004                page_info->page = NULL;
1005        }
1006        BUG_ON(j > MAX_SKB_FRAGS);
1007
1008done:
1009        be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1010}
1011
1012/* Process the RX completion indicated by rxcp when GRO is disabled */
1013static void be_rx_compl_process(struct be_adapter *adapter,
1014                        struct be_rx_obj *rxo,
1015                        struct be_eth_rx_compl *rxcp)
1016{
1017        struct sk_buff *skb;
1018        u32 vlanf, vid;
1019        u16 num_rcvd;
1020        u8 vtm;
1021
1022        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1023
1024        skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025        if (unlikely(!skb)) {
1026                if (net_ratelimit())
1027                        dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1028                be_rx_compl_discard(adapter, rxo, rxcp);
1029                return;
1030        }
1031
1032        skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1033
1034        if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035                skb->ip_summed = CHECKSUM_UNNECESSARY;
1036        else
1037                skb_checksum_none_assert(skb);
1038
1039        skb->truesize = skb->len + sizeof(struct sk_buff);
1040        skb->protocol = eth_type_trans(skb, adapter->netdev);
1041
1042        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045        /* vlanf could be wrongly set in some cards.
1046         * ignore if vtm is not set */
1047        if ((adapter->function_mode & 0x400) && !vtm)
1048                vlanf = 0;
1049
1050        if (unlikely(vlanf)) {
1051                if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052                        kfree_skb(skb);
1053                        return;
1054                }
1055                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1056                if (!lancer_chip(adapter))
1057                        vid = swab16(vid);
1058                vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059        } else {
1060                netif_receive_skb(skb);
1061        }
1062}
1063
1064/* Process the RX completion indicated by rxcp when GRO is enabled */
1065static void be_rx_compl_process_gro(struct be_adapter *adapter,
1066                struct be_rx_obj *rxo,
1067                struct be_eth_rx_compl *rxcp)
1068{
1069        struct be_rx_page_info *page_info;
1070        struct sk_buff *skb = NULL;
1071        struct be_queue_info *rxq = &rxo->q;
1072        struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1073        u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1074        u16 i, rxq_idx = 0, vid, j;
1075        u8 vtm;
1076        u8 pkt_type;
1077
1078        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079        pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1082        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1083        pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1084
1085        /* vlanf could be wrongly set in some cards.
1086         * ignore if vtm is not set */
1087        if ((adapter->function_mode & 0x400) && !vtm)
1088                vlanf = 0;
1089
1090        skb = napi_get_frags(&eq_obj->napi);
1091        if (!skb) {
1092                be_rx_compl_discard(adapter, rxo, rxcp);
1093                return;
1094        }
1095
1096        remaining = pkt_size;
1097        for (i = 0, j = -1; i < num_rcvd; i++) {
1098                page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1099
1100                curr_frag_len = min(remaining, rx_frag_size);
1101
1102                /* Coalesce all frags from the same physical page in one slot */
1103                if (i == 0 || page_info->page_offset == 0) {
1104                        /* First frag or Fresh page */
1105                        j++;
1106                        skb_shinfo(skb)->frags[j].page = page_info->page;
1107                        skb_shinfo(skb)->frags[j].page_offset =
1108                                                        page_info->page_offset;
1109                        skb_shinfo(skb)->frags[j].size = 0;
1110                } else {
1111                        put_page(page_info->page);
1112                }
1113                skb_shinfo(skb)->frags[j].size += curr_frag_len;
1114
1115                remaining -= curr_frag_len;
1116                index_inc(&rxq_idx, rxq->len);
1117                memset(page_info, 0, sizeof(*page_info));
1118        }
1119        BUG_ON(j > MAX_SKB_FRAGS);
1120
1121        skb_shinfo(skb)->nr_frags = j + 1;
1122        skb->len = pkt_size;
1123        skb->data_len = pkt_size;
1124        skb->truesize += pkt_size;
1125        skb->ip_summed = CHECKSUM_UNNECESSARY;
1126
1127        if (likely(!vlanf)) {
1128                napi_gro_frags(&eq_obj->napi);
1129        } else {
1130                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1131                if (!lancer_chip(adapter))
1132                        vid = swab16(vid);
1133
1134                if (!adapter->vlan_grp || adapter->vlans_added == 0)
1135                        return;
1136
1137                vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1138        }
1139
1140        be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1141}
1142
1143static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1144{
1145        struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1146
1147        if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1148                return NULL;
1149
1150        rmb();
1151        be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1152
1153        queue_tail_inc(&rxo->cq);
1154        return rxcp;
1155}
1156
1157/* To reset the valid bit, we need to reset the whole word as
1158 * when walking the queue the valid entries are little-endian
1159 * and invalid entries are host endian
1160 */
1161static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1162{
1163        rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164}
1165
1166static inline struct page *be_alloc_pages(u32 size)
1167{
1168        gfp_t alloc_flags = GFP_ATOMIC;
1169        u32 order = get_order(size);
1170        if (order > 0)
1171                alloc_flags |= __GFP_COMP;
1172        return  alloc_pages(alloc_flags, order);
1173}
1174
1175/*
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE
1178 */
1179static void be_post_rx_frags(struct be_rx_obj *rxo)
1180{
1181        struct be_adapter *adapter = rxo->adapter;
1182        struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1183        struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1184        struct be_queue_info *rxq = &rxo->q;
1185        struct page *pagep = NULL;
1186        struct be_eth_rx_d *rxd;
1187        u64 page_dmaaddr = 0, frag_dmaaddr;
1188        u32 posted, page_offset = 0;
1189
1190        page_info = &rxo->page_info_tbl[rxq->head];
1191        for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192                if (!pagep) {
1193                        pagep = be_alloc_pages(adapter->big_page_size);
1194                        if (unlikely(!pagep)) {
1195                                rxo->stats.rx_post_fail++;
1196                                break;
1197                        }
1198                        page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1199                                                adapter->big_page_size,
1200                                                PCI_DMA_FROMDEVICE);
1201                        page_info->page_offset = 0;
1202                } else {
1203                        get_page(pagep);
1204                        page_info->page_offset = page_offset + rx_frag_size;
1205                }
1206                page_offset = page_info->page_offset;
1207                page_info->page = pagep;
1208                dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1209                frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1210
1211                rxd = queue_head_node(rxq);
1212                rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1213                rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1214
1215                /* Any space left in the current big page for another frag? */
1216                if ((page_offset + rx_frag_size + rx_frag_size) >
1217                                        adapter->big_page_size) {
1218                        pagep = NULL;
1219                        page_info->last_page_user = true;
1220                }
1221
1222                prev_page_info = page_info;
1223                queue_head_inc(rxq);
1224                page_info = &page_info_tbl[rxq->head];
1225        }
1226        if (pagep)
1227                prev_page_info->last_page_user = true;
1228
1229        if (posted) {
1230                atomic_add(posted, &rxq->used);
1231                be_rxq_notify(adapter, rxq->id, posted);
1232        } else if (atomic_read(&rxq->used) == 0) {
1233                /* Let be_worker replenish when memory is available */
1234                rxo->rx_post_starved = true;
1235        }
1236}
1237
1238static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1239{
1240        struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1241
1242        if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1243                return NULL;
1244
1245        rmb();
1246        be_dws_le_to_cpu(txcp, sizeof(*txcp));
1247
1248        txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1249
1250        queue_tail_inc(tx_cq);
1251        return txcp;
1252}
1253
1254static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1255{
1256        struct be_queue_info *txq = &adapter->tx_obj.q;
1257        struct be_eth_wrb *wrb;
1258        struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1259        struct sk_buff *sent_skb;
1260        u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1261        bool unmap_skb_hdr = true;
1262
1263        sent_skb = sent_skbs[txq->tail];
1264        BUG_ON(!sent_skb);
1265        sent_skbs[txq->tail] = NULL;
1266
1267        /* skip header wrb */
1268        queue_tail_inc(txq);
1269
1270        do {
1271                cur_index = txq->tail;
1272                wrb = queue_tail_node(txq);
1273                unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1274                                        skb_headlen(sent_skb)));
1275                unmap_skb_hdr = false;
1276
1277                num_wrbs++;
1278                queue_tail_inc(txq);
1279        } while (cur_index != last_index);
1280
1281        atomic_sub(num_wrbs, &txq->used);
1282
1283        kfree_skb(sent_skb);
1284}
1285
1286static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1287{
1288        struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1289
1290        if (!eqe->evt)
1291                return NULL;
1292
1293        rmb();
1294        eqe->evt = le32_to_cpu(eqe->evt);
1295        queue_tail_inc(&eq_obj->q);
1296        return eqe;
1297}
1298
1299static int event_handle(struct be_adapter *adapter,
1300                        struct be_eq_obj *eq_obj)
1301{
1302        struct be_eq_entry *eqe;
1303        u16 num = 0;
1304
1305        while ((eqe = event_get(eq_obj)) != NULL) {
1306                eqe->evt = 0;
1307                num++;
1308        }
1309
1310        /* Deal with any spurious interrupts that come
1311         * without events
1312         */
1313        be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1314        if (num)
1315                napi_schedule(&eq_obj->napi);
1316
1317        return num;
1318}
1319
1320/* Just read and notify events without processing them.
1321 * Used at the time of destroying event queues */
1322static void be_eq_clean(struct be_adapter *adapter,
1323                        struct be_eq_obj *eq_obj)
1324{
1325        struct be_eq_entry *eqe;
1326        u16 num = 0;
1327
1328        while ((eqe = event_get(eq_obj)) != NULL) {
1329                eqe->evt = 0;
1330                num++;
1331        }
1332
1333        if (num)
1334                be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1335}
1336
1337static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1338{
1339        struct be_rx_page_info *page_info;
1340        struct be_queue_info *rxq = &rxo->q;
1341        struct be_queue_info *rx_cq = &rxo->cq;
1342        struct be_eth_rx_compl *rxcp;
1343        u16 tail;
1344
1345        /* First cleanup pending rx completions */
1346        while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347                be_rx_compl_discard(adapter, rxo, rxcp);
1348                be_rx_compl_reset(rxcp);
1349                be_cq_notify(adapter, rx_cq->id, false, 1);
1350        }
1351
1352        /* Then free posted rx buffer that were not used */
1353        tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1354        for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1355                page_info = get_rx_page_info(adapter, rxo, tail);
1356                put_page(page_info->page);
1357                memset(page_info, 0, sizeof(*page_info));
1358        }
1359        BUG_ON(atomic_read(&rxq->used));
1360}
1361
1362static void be_tx_compl_clean(struct be_adapter *adapter)
1363{
1364        struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1365        struct be_queue_info *txq = &adapter->tx_obj.q;
1366        struct be_eth_tx_compl *txcp;
1367        u16 end_idx, cmpl = 0, timeo = 0;
1368        struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1369        struct sk_buff *sent_skb;
1370        bool dummy_wrb;
1371
1372        /* Wait for a max of 200ms for all the tx-completions to arrive. */
1373        do {
1374                while ((txcp = be_tx_compl_get(tx_cq))) {
1375                        end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1376                                        wrb_index, txcp);
1377                        be_tx_compl_process(adapter, end_idx);
1378                        cmpl++;
1379                }
1380                if (cmpl) {
1381                        be_cq_notify(adapter, tx_cq->id, false, cmpl);
1382                        cmpl = 0;
1383                }
1384
1385                if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1386                        break;
1387
1388                mdelay(1);
1389        } while (true);
1390
1391        if (atomic_read(&txq->used))
1392                dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1393                        atomic_read(&txq->used));
1394
1395        /* free posted tx for which compls will never arrive */
1396        while (atomic_read(&txq->used)) {
1397                sent_skb = sent_skbs[txq->tail];
1398                end_idx = txq->tail;
1399                index_adv(&end_idx,
1400                        wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1401                        txq->len);
1402                be_tx_compl_process(adapter, end_idx);
1403        }
1404}
1405
1406static void be_mcc_queues_destroy(struct be_adapter *adapter)
1407{
1408        struct be_queue_info *q;
1409
1410        q = &adapter->mcc_obj.q;
1411        if (q->created)
1412                be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1413        be_queue_free(adapter, q);
1414
1415        q = &adapter->mcc_obj.cq;
1416        if (q->created)
1417                be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1418        be_queue_free(adapter, q);
1419}
1420
1421/* Must be called only after TX qs are created as MCC shares TX EQ */
1422static int be_mcc_queues_create(struct be_adapter *adapter)
1423{
1424        struct be_queue_info *q, *cq;
1425
1426        /* Alloc MCC compl queue */
1427        cq = &adapter->mcc_obj.cq;
1428        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1429                        sizeof(struct be_mcc_compl)))
1430                goto err;
1431
1432        /* Ask BE to create MCC compl queue; share TX's eq */
1433        if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1434                goto mcc_cq_free;
1435
1436        /* Alloc MCC queue */
1437        q = &adapter->mcc_obj.q;
1438        if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1439                goto mcc_cq_destroy;
1440
1441        /* Ask BE to create MCC queue */
1442        if (be_cmd_mccq_create(adapter, q, cq))
1443                goto mcc_q_free;
1444
1445        return 0;
1446
1447mcc_q_free:
1448        be_queue_free(adapter, q);
1449mcc_cq_destroy:
1450        be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1451mcc_cq_free:
1452        be_queue_free(adapter, cq);
1453err:
1454        return -1;
1455}
1456
1457static void be_tx_queues_destroy(struct be_adapter *adapter)
1458{
1459        struct be_queue_info *q;
1460
1461        q = &adapter->tx_obj.q;
1462        if (q->created)
1463                be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1464        be_queue_free(adapter, q);
1465
1466        q = &adapter->tx_obj.cq;
1467        if (q->created)
1468                be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1469        be_queue_free(adapter, q);
1470
1471        /* Clear any residual events */
1472        be_eq_clean(adapter, &adapter->tx_eq);
1473
1474        q = &adapter->tx_eq.q;
1475        if (q->created)
1476                be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1477        be_queue_free(adapter, q);
1478}
1479
1480static int be_tx_queues_create(struct be_adapter *adapter)
1481{
1482        struct be_queue_info *eq, *q, *cq;
1483
1484        adapter->tx_eq.max_eqd = 0;
1485        adapter->tx_eq.min_eqd = 0;
1486        adapter->tx_eq.cur_eqd = 96;
1487        adapter->tx_eq.enable_aic = false;
1488        /* Alloc Tx Event queue */
1489        eq = &adapter->tx_eq.q;
1490        if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1491                return -1;
1492
1493        /* Ask BE to create Tx Event queue */
1494        if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1495                goto tx_eq_free;
1496
1497        adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1498
1499
1500        /* Alloc TX eth compl queue */
1501        cq = &adapter->tx_obj.cq;
1502        if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1503                        sizeof(struct be_eth_tx_compl)))
1504                goto tx_eq_destroy;
1505
1506        /* Ask BE to create Tx eth compl queue */
1507        if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1508                goto tx_cq_free;
1509
1510        /* Alloc TX eth queue */
1511        q = &adapter->tx_obj.q;
1512        if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1513                goto tx_cq_destroy;
1514
1515        /* Ask BE to create Tx eth queue */
1516        if (be_cmd_txq_create(adapter, q, cq))
1517                goto tx_q_free;
1518        return 0;
1519
1520tx_q_free:
1521        be_queue_free(adapter, q);
1522tx_cq_destroy:
1523        be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1524tx_cq_free:
1525        be_queue_free(adapter, cq);
1526tx_eq_destroy:
1527        be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1528tx_eq_free:
1529        be_queue_free(adapter, eq);
1530        return -1;
1531}
1532
1533static void be_rx_queues_destroy(struct be_adapter *adapter)
1534{
1535        struct be_queue_info *q;
1536        struct be_rx_obj *rxo;
1537        int i;
1538
1539        for_all_rx_queues(adapter, rxo, i) {
1540                q = &rxo->q;
1541                if (q->created) {
1542                        be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1543                        /* After the rxq is invalidated, wait for a grace time
1544                         * of 1ms for all dma to end and the flush compl to
1545                         * arrive
1546                         */
1547                        mdelay(1);
1548                        be_rx_q_clean(adapter, rxo);
1549                }
1550                be_queue_free(adapter, q);
1551
1552                q = &rxo->cq;
1553                if (q->created)
1554                        be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1555                be_queue_free(adapter, q);
1556
1557                /* Clear any residual events */
1558                q = &rxo->rx_eq.q;
1559                if (q->created) {
1560                        be_eq_clean(adapter, &rxo->rx_eq);
1561                        be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1562                }
1563                be_queue_free(adapter, q);
1564        }
1565}
1566
1567static int be_rx_queues_create(struct be_adapter *adapter)
1568{
1569        struct be_queue_info *eq, *q, *cq;
1570        struct be_rx_obj *rxo;
1571        int rc, i;
1572
1573        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1574        for_all_rx_queues(adapter, rxo, i) {
1575                rxo->adapter = adapter;
1576                /* Init last_frag_index so that the frag index in the first
1577                 * completion will never match */
1578                rxo->last_frag_index = 0xffff;
1579                rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                rxo->rx_eq.enable_aic = true;
1581
1582                /* EQ */
1583                eq = &rxo->rx_eq.q;
1584                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                        sizeof(struct be_eq_entry));
1586                if (rc)
1587                        goto err;
1588
1589                rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                if (rc)
1591                        goto err;
1592
1593                rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1595                /* CQ */
1596                cq = &rxo->cq;
1597                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                sizeof(struct be_eth_rx_compl));
1599                if (rc)
1600                        goto err;
1601
1602                rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                if (rc)
1604                        goto err;
1605                /* Rx Q */
1606                q = &rxo->q;
1607                rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                sizeof(struct be_eth_rx_d));
1609                if (rc)
1610                        goto err;
1611
1612                rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                        BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                        (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                if (rc)
1616                        goto err;
1617        }
1618
1619        if (be_multi_rxq(adapter)) {
1620                u8 rsstable[MAX_RSS_QS];
1621
1622                for_all_rss_queues(adapter, rxo, i)
1623                        rsstable[i] = rxo->rss_id;
1624
1625                rc = be_cmd_rss_config(adapter, rsstable,
1626                        adapter->num_rx_qs - 1);
1627                if (rc)
1628                        goto err;
1629        }
1630
1631        return 0;
1632err:
1633        be_rx_queues_destroy(adapter);
1634        return -1;
1635}
1636
1637static bool event_peek(struct be_eq_obj *eq_obj)
1638{
1639        struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640        if (!eqe->evt)
1641                return false;
1642        else
1643                return true;
1644}
1645
1646static irqreturn_t be_intx(int irq, void *dev)
1647{
1648        struct be_adapter *adapter = dev;
1649        struct be_rx_obj *rxo;
1650        int isr, i, tx = 0 , rx = 0;
1651
1652        if (lancer_chip(adapter)) {
1653                if (event_peek(&adapter->tx_eq))
1654                        tx = event_handle(adapter, &adapter->tx_eq);
1655                for_all_rx_queues(adapter, rxo, i) {
1656                        if (event_peek(&rxo->rx_eq))
1657                                rx |= event_handle(adapter, &rxo->rx_eq);
1658                }
1659
1660                if (!(tx || rx))
1661                        return IRQ_NONE;
1662
1663        } else {
1664                isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                        (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                if (!isr)
1667                        return IRQ_NONE;
1668
1669                if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670                        event_handle(adapter, &adapter->tx_eq);
1671
1672                for_all_rx_queues(adapter, rxo, i) {
1673                        if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674                                event_handle(adapter, &rxo->rx_eq);
1675                }
1676        }
1677
1678        return IRQ_HANDLED;
1679}
1680
1681static irqreturn_t be_msix_rx(int irq, void *dev)
1682{
1683        struct be_rx_obj *rxo = dev;
1684        struct be_adapter *adapter = rxo->adapter;
1685
1686        event_handle(adapter, &rxo->rx_eq);
1687
1688        return IRQ_HANDLED;
1689}
1690
1691static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692{
1693        struct be_adapter *adapter = dev;
1694
1695        event_handle(adapter, &adapter->tx_eq);
1696
1697        return IRQ_HANDLED;
1698}
1699
1700static inline bool do_gro(struct be_rx_obj *rxo,
1701                        struct be_eth_rx_compl *rxcp, u8 err)
1702{
1703        int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705        if (err)
1706                rxo->stats.rxcp_err++;
1707
1708        return (tcp_frame && !err) ? true : false;
1709}
1710
1711static int be_poll_rx(struct napi_struct *napi, int budget)
1712{
1713        struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1714        struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715        struct be_adapter *adapter = rxo->adapter;
1716        struct be_queue_info *rx_cq = &rxo->cq;
1717        struct be_eth_rx_compl *rxcp;
1718        u32 work_done;
1719        u16 frag_index, num_rcvd;
1720        u8 err;
1721
1722        rxo->stats.rx_polls++;
1723        for (work_done = 0; work_done < budget; work_done++) {
1724                rxcp = be_rx_compl_get(rxo);
1725                if (!rxcp)
1726                        break;
1727
1728                err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729                frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1730                                                                rxcp);
1731                num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1732                                                                rxcp);
1733
1734                /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735                if (likely(frag_index != rxo->last_frag_index &&
1736                                num_rcvd != 0)) {
1737                        rxo->last_frag_index = frag_index;
1738
1739                        if (do_gro(rxo, rxcp, err))
1740                                be_rx_compl_process_gro(adapter, rxo, rxcp);
1741                        else
1742                                be_rx_compl_process(adapter, rxo, rxcp);
1743                }
1744
1745                be_rx_compl_reset(rxcp);
1746        }
1747
1748        /* Refill the queue */
1749        if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750                be_post_rx_frags(rxo);
1751
1752        /* All consumed */
1753        if (work_done < budget) {
1754                napi_complete(napi);
1755                be_cq_notify(adapter, rx_cq->id, true, work_done);
1756        } else {
1757                /* More to be consumed; continue with interrupts disabled */
1758                be_cq_notify(adapter, rx_cq->id, false, work_done);
1759        }
1760        return work_done;
1761}
1762
1763/* As TX and MCC share the same EQ check for both TX and MCC completions.
1764 * For TX/MCC we don't honour budget; consume everything
1765 */
1766static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1767{
1768        struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1769        struct be_adapter *adapter =
1770                container_of(tx_eq, struct be_adapter, tx_eq);
1771        struct be_queue_info *txq = &adapter->tx_obj.q;
1772        struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1773        struct be_eth_tx_compl *txcp;
1774        int tx_compl = 0, mcc_compl, status = 0;
1775        u16 end_idx;
1776
1777        while ((txcp = be_tx_compl_get(tx_cq))) {
1778                end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1779                                wrb_index, txcp);
1780                be_tx_compl_process(adapter, end_idx);
1781                tx_compl++;
1782        }
1783
1784        mcc_compl = be_process_mcc(adapter, &status);
1785
1786        napi_complete(napi);
1787
1788        if (mcc_compl) {
1789                struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1790                be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1791        }
1792
1793        if (tx_compl) {
1794                be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1795
1796                /* As Tx wrbs have been freed up, wake up netdev queue if
1797                 * it was stopped due to lack of tx wrbs.
1798                 */
1799                if (netif_queue_stopped(adapter->netdev) &&
1800                        atomic_read(&txq->used) < txq->len / 2) {
1801                        netif_wake_queue(adapter->netdev);
1802                }
1803
1804                tx_stats(adapter)->be_tx_events++;
1805                tx_stats(adapter)->be_tx_compl += tx_compl;
1806        }
1807
1808        return 1;
1809}
1810
1811void be_detect_dump_ue(struct be_adapter *adapter)
1812{
1813        u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1814        u32 i;
1815
1816        pci_read_config_dword(adapter->pdev,
1817                                PCICFG_UE_STATUS_LOW, &ue_status_lo);
1818        pci_read_config_dword(adapter->pdev,
1819                                PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1820        pci_read_config_dword(adapter->pdev,
1821                                PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1822        pci_read_config_dword(adapter->pdev,
1823                                PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1824
1825        ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1826        ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1827
1828        if (ue_status_lo || ue_status_hi) {
1829                adapter->ue_detected = true;
1830                dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1831        }
1832
1833        if (ue_status_lo) {
1834                for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1835                        if (ue_status_lo & 1)
1836                                dev_err(&adapter->pdev->dev,
1837                                "UE: %s bit set\n", ue_status_low_desc[i]);
1838                }
1839        }
1840        if (ue_status_hi) {
1841                for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1842                        if (ue_status_hi & 1)
1843                                dev_err(&adapter->pdev->dev,
1844                                "UE: %s bit set\n", ue_status_hi_desc[i]);
1845                }
1846        }
1847
1848}
1849
1850static void be_worker(struct work_struct *work)
1851{
1852        struct be_adapter *adapter =
1853                container_of(work, struct be_adapter, work.work);
1854        struct be_rx_obj *rxo;
1855        int i;
1856
1857        /* when interrupts are not yet enabled, just reap any pending
1858        * mcc completions */
1859        if (!netif_running(adapter->netdev)) {
1860                int mcc_compl, status = 0;
1861
1862                mcc_compl = be_process_mcc(adapter, &status);
1863
1864                if (mcc_compl) {
1865                        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1866                        be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1867                }
1868                goto reschedule;
1869        }
1870
1871        if (!adapter->stats_ioctl_sent)
1872                be_cmd_get_stats(adapter, &adapter->stats_cmd);
1873
1874        be_tx_rate_update(adapter);
1875
1876        for_all_rx_queues(adapter, rxo, i) {
1877                be_rx_rate_update(rxo);
1878                be_rx_eqd_update(adapter, rxo);
1879
1880                if (rxo->rx_post_starved) {
1881                        rxo->rx_post_starved = false;
1882                        be_post_rx_frags(rxo);
1883                }
1884        }
1885        if (!adapter->ue_detected && !lancer_chip(adapter))
1886                be_detect_dump_ue(adapter);
1887
1888reschedule:
1889        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1890}
1891
1892static void be_msix_disable(struct be_adapter *adapter)
1893{
1894        if (adapter->msix_enabled) {
1895                pci_disable_msix(adapter->pdev);
1896                adapter->msix_enabled = false;
1897        }
1898}
1899
1900static int be_num_rxqs_get(struct be_adapter *adapter)
1901{
1902        if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1903                !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1904                return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1905        } else {
1906                dev_warn(&adapter->pdev->dev,
1907                        "No support for multiple RX queues\n");
1908                return 1;
1909        }
1910}
1911
1912static void be_msix_enable(struct be_adapter *adapter)
1913{
1914#define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1915        int i, status;
1916
1917        adapter->num_rx_qs = be_num_rxqs_get(adapter);
1918
1919        for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1920                adapter->msix_entries[i].entry = i;
1921
1922        status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1923                        adapter->num_rx_qs + 1);
1924        if (status == 0) {
1925                goto done;
1926        } else if (status >= BE_MIN_MSIX_VECTORS) {
1927                if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1928                                status) == 0) {
1929                        adapter->num_rx_qs = status - 1;
1930                        dev_warn(&adapter->pdev->dev,
1931                                "Could alloc only %d MSIx vectors. "
1932                                "Using %d RX Qs\n", status, adapter->num_rx_qs);
1933                        goto done;
1934                }
1935        }
1936        return;
1937done:
1938        adapter->msix_enabled = true;
1939}
1940
1941static void be_sriov_enable(struct be_adapter *adapter)
1942{
1943        be_check_sriov_fn_type(adapter);
1944#ifdef CONFIG_PCI_IOV
1945        if (be_physfn(adapter) && num_vfs) {
1946                int status;
1947
1948                status = pci_enable_sriov(adapter->pdev, num_vfs);
1949                adapter->sriov_enabled = status ? false : true;
1950        }
1951#endif
1952}
1953
1954static void be_sriov_disable(struct be_adapter *adapter)
1955{
1956#ifdef CONFIG_PCI_IOV
1957        if (adapter->sriov_enabled) {
1958                pci_disable_sriov(adapter->pdev);
1959                adapter->sriov_enabled = false;
1960        }
1961#endif
1962}
1963
1964static inline int be_msix_vec_get(struct be_adapter *adapter,
1965                                        struct be_eq_obj *eq_obj)
1966{
1967        return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1968}
1969
1970static int be_request_irq(struct be_adapter *adapter,
1971                struct be_eq_obj *eq_obj,
1972                void *handler, char *desc, void *context)
1973{
1974        struct net_device *netdev = adapter->netdev;
1975        int vec;
1976
1977        sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1978        vec = be_msix_vec_get(adapter, eq_obj);
1979        return request_irq(vec, handler, 0, eq_obj->desc, context);
1980}
1981
1982static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1983                        void *context)
1984{
1985        int vec = be_msix_vec_get(adapter, eq_obj);
1986        free_irq(vec, context);
1987}
1988
1989static int be_msix_register(struct be_adapter *adapter)
1990{
1991        struct be_rx_obj *rxo;
1992        int status, i;
1993        char qname[10];
1994
1995        status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1996                                adapter);
1997        if (status)
1998                goto err;
1999
2000        for_all_rx_queues(adapter, rxo, i) {
2001                sprintf(qname, "rxq%d", i);
2002                status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2003                                qname, rxo);
2004                if (status)
2005                        goto err_msix;
2006        }
2007
2008        return 0;
2009
2010err_msix:
2011        be_free_irq(adapter, &adapter->tx_eq, adapter);
2012
2013        for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2014                be_free_irq(adapter, &rxo->rx_eq, rxo);
2015
2016err:
2017        dev_warn(&adapter->pdev->dev,
2018                "MSIX Request IRQ failed - err %d\n", status);
2019        pci_disable_msix(adapter->pdev);
2020        adapter->msix_enabled = false;
2021        return status;
2022}
2023
2024static int be_irq_register(struct be_adapter *adapter)
2025{
2026        struct net_device *netdev = adapter->netdev;
2027        int status;
2028
2029        if (adapter->msix_enabled) {
2030                status = be_msix_register(adapter);
2031                if (status == 0)
2032                        goto done;
2033                /* INTx is not supported for VF */
2034                if (!be_physfn(adapter))
2035                        return status;
2036        }
2037
2038        /* INTx */
2039        netdev->irq = adapter->pdev->irq;
2040        status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2041                        adapter);
2042        if (status) {
2043                dev_err(&adapter->pdev->dev,
2044                        "INTx request IRQ failed - err %d\n", status);
2045                return status;
2046        }
2047done:
2048        adapter->isr_registered = true;
2049        return 0;
2050}
2051
2052static void be_irq_unregister(struct be_adapter *adapter)
2053{
2054        struct net_device *netdev = adapter->netdev;
2055        struct be_rx_obj *rxo;
2056        int i;
2057
2058        if (!adapter->isr_registered)
2059                return;
2060
2061        /* INTx */
2062        if (!adapter->msix_enabled) {
2063                free_irq(netdev->irq, adapter);
2064                goto done;
2065        }
2066
2067        /* MSIx */
2068        be_free_irq(adapter, &adapter->tx_eq, adapter);
2069
2070        for_all_rx_queues(adapter, rxo, i)
2071                be_free_irq(adapter, &rxo->rx_eq, rxo);
2072
2073done:
2074        adapter->isr_registered = false;
2075}
2076
2077static int be_close(struct net_device *netdev)
2078{
2079        struct be_adapter *adapter = netdev_priv(netdev);
2080        struct be_rx_obj *rxo;
2081        struct be_eq_obj *tx_eq = &adapter->tx_eq;
2082        int vec, i;
2083
2084        be_async_mcc_disable(adapter);
2085
2086        netif_stop_queue(netdev);
2087        netif_carrier_off(netdev);
2088        adapter->link_up = false;
2089
2090        if (!lancer_chip(adapter))
2091                be_intr_set(adapter, false);
2092
2093        if (adapter->msix_enabled) {
2094                vec = be_msix_vec_get(adapter, tx_eq);
2095                synchronize_irq(vec);
2096
2097                for_all_rx_queues(adapter, rxo, i) {
2098                        vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2099                        synchronize_irq(vec);
2100                }
2101        } else {
2102                synchronize_irq(netdev->irq);
2103        }
2104        be_irq_unregister(adapter);
2105
2106        for_all_rx_queues(adapter, rxo, i)
2107                napi_disable(&rxo->rx_eq.napi);
2108
2109        napi_disable(&tx_eq->napi);
2110
2111        /* Wait for all pending tx completions to arrive so that
2112         * all tx skbs are freed.
2113         */
2114        be_tx_compl_clean(adapter);
2115
2116        return 0;
2117}
2118
2119static int be_open(struct net_device *netdev)
2120{
2121        struct be_adapter *adapter = netdev_priv(netdev);
2122        struct be_eq_obj *tx_eq = &adapter->tx_eq;
2123        struct be_rx_obj *rxo;
2124        bool link_up;
2125        int status, i;
2126        u8 mac_speed;
2127        u16 link_speed;
2128
2129        for_all_rx_queues(adapter, rxo, i) {
2130                be_post_rx_frags(rxo);
2131                napi_enable(&rxo->rx_eq.napi);
2132        }
2133        napi_enable(&tx_eq->napi);
2134
2135        be_irq_register(adapter);
2136
2137        if (!lancer_chip(adapter))
2138                be_intr_set(adapter, true);
2139
2140        /* The evt queues are created in unarmed state; arm them */
2141        for_all_rx_queues(adapter, rxo, i) {
2142                be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2143                be_cq_notify(adapter, rxo->cq.id, true, 0);
2144        }
2145        be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2146
2147        /* Now that interrupts are on we can process async mcc */
2148        be_async_mcc_enable(adapter);
2149
2150        status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2151                        &link_speed);
2152        if (status)
2153                goto err;
2154        be_link_status_update(adapter, link_up);
2155
2156        if (be_physfn(adapter)) {
2157                status = be_vid_config(adapter, false, 0);
2158                if (status)
2159                        goto err;
2160
2161                status = be_cmd_set_flow_control(adapter,
2162                                adapter->tx_fc, adapter->rx_fc);
2163                if (status)
2164                        goto err;
2165        }
2166
2167        return 0;
2168err:
2169        be_close(adapter->netdev);
2170        return -EIO;
2171}
2172
2173static int be_setup_wol(struct be_adapter *adapter, bool enable)
2174{
2175        struct be_dma_mem cmd;
2176        int status = 0;
2177        u8 mac[ETH_ALEN];
2178
2179        memset(mac, 0, ETH_ALEN);
2180
2181        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2183        if (cmd.va == NULL)
2184                return -1;
2185        memset(cmd.va, 0, cmd.size);
2186
2187        if (enable) {
2188                status = pci_write_config_dword(adapter->pdev,
2189                        PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190                if (status) {
2191                        dev_err(&adapter->pdev->dev,
2192                                "Could not enable Wake-on-lan\n");
2193                        pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2194                                        cmd.dma);
2195                        return status;
2196                }
2197                status = be_cmd_enable_magic_wol(adapter,
2198                                adapter->netdev->dev_addr, &cmd);
2199                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201        } else {
2202                status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203                pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204                pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205        }
2206
2207        pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2208        return status;
2209}
2210
2211/*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218{
2219        u32 vf = 0;
2220        int status = 0;
2221        u8 mac[ETH_ALEN];
2222
2223        be_vf_eth_addr_generate(adapter, mac);
2224
2225        for (vf = 0; vf < num_vfs; vf++) {
2226                status = be_cmd_pmac_add(adapter, mac,
2227                                        adapter->vf_cfg[vf].vf_if_handle,
2228                                        &adapter->vf_cfg[vf].vf_pmac_id);
2229                if (status)
2230                        dev_err(&adapter->pdev->dev,
2231                                "Mac address add failed for VF %d\n", vf);
2232                else
2233                        memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2234
2235                mac[5] += 1;
2236        }
2237        return status;
2238}
2239
2240static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2241{
2242        u32 vf;
2243
2244        for (vf = 0; vf < num_vfs; vf++) {
2245                if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246                        be_cmd_pmac_del(adapter,
2247                                        adapter->vf_cfg[vf].vf_if_handle,
2248                                        adapter->vf_cfg[vf].vf_pmac_id);
2249        }
2250}
2251
2252static int be_setup(struct be_adapter *adapter)
2253{
2254        struct net_device *netdev = adapter->netdev;
2255        u32 cap_flags, en_flags, vf = 0;
2256        int status;
2257        u8 mac[ETH_ALEN];
2258
2259        cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2260
2261        if (be_physfn(adapter)) {
2262                cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263                                BE_IF_FLAGS_PROMISCUOUS |
2264                                BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265                en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2266
2267                if (be_multi_rxq(adapter)) {
2268                        cap_flags |= BE_IF_FLAGS_RSS;
2269                        en_flags |= BE_IF_FLAGS_RSS;
2270                }
2271        }
2272
2273        status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274                        netdev->dev_addr, false/* pmac_invalid */,
2275                        &adapter->if_handle, &adapter->pmac_id, 0);
2276        if (status != 0)
2277                goto do_none;
2278
2279        if (be_physfn(adapter)) {
2280                while (vf < num_vfs) {
2281                        cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2282                                        | BE_IF_FLAGS_BROADCAST;
2283                        status = be_cmd_if_create(adapter, cap_flags, en_flags,
2284                                        mac, true,
2285                                        &adapter->vf_cfg[vf].vf_if_handle,
2286                                        NULL, vf+1);
2287                        if (status) {
2288                                dev_err(&adapter->pdev->dev,
2289                                "Interface Create failed for VF %d\n", vf);
2290                                goto if_destroy;
2291                        }
2292                        adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2293                        vf++;
2294                }
2295        } else if (!be_physfn(adapter)) {
2296                status = be_cmd_mac_addr_query(adapter, mac,
2297                        MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298                if (!status) {
2299                        memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2300                        memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2301                }
2302        }
2303
2304        status = be_tx_queues_create(adapter);
2305        if (status != 0)
2306                goto if_destroy;
2307
2308        status = be_rx_queues_create(adapter);
2309        if (status != 0)
2310                goto tx_qs_destroy;
2311
2312        status = be_mcc_queues_create(adapter);
2313        if (status != 0)
2314                goto rx_qs_destroy;
2315
2316        if (be_physfn(adapter)) {
2317                status = be_vf_eth_addr_config(adapter);
2318                if (status)
2319                        goto mcc_q_destroy;
2320        }
2321
2322        adapter->link_speed = -1;
2323
2324        return 0;
2325
2326mcc_q_destroy:
2327        if (be_physfn(adapter))
2328                be_vf_eth_addr_rem(adapter);
2329        be_mcc_queues_destroy(adapter);
2330rx_qs_destroy:
2331        be_rx_queues_destroy(adapter);
2332tx_qs_destroy:
2333        be_tx_queues_destroy(adapter);
2334if_destroy:
2335        for (vf = 0; vf < num_vfs; vf++)
2336                if (adapter->vf_cfg[vf].vf_if_handle)
2337                        be_cmd_if_destroy(adapter,
2338                                        adapter->vf_cfg[vf].vf_if_handle);
2339        be_cmd_if_destroy(adapter, adapter->if_handle);
2340do_none:
2341        return status;
2342}
2343
2344static int be_clear(struct be_adapter *adapter)
2345{
2346        if (be_physfn(adapter))
2347                be_vf_eth_addr_rem(adapter);
2348
2349        be_mcc_queues_destroy(adapter);
2350        be_rx_queues_destroy(adapter);
2351        be_tx_queues_destroy(adapter);
2352
2353        be_cmd_if_destroy(adapter, adapter->if_handle);
2354
2355        /* tell fw we're done with firing cmds */
2356        be_cmd_fw_clean(adapter);
2357        return 0;
2358}
2359
2360
2361#define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2362static bool be_flash_redboot(struct be_adapter *adapter,
2363                        const u8 *p, u32 img_start, int image_size,
2364                        int hdr_size)
2365{
2366        u32 crc_offset;
2367        u8 flashed_crc[4];
2368        int status;
2369
2370        crc_offset = hdr_size + img_start + image_size - 4;
2371
2372        p += crc_offset;
2373
2374        status = be_cmd_get_flash_crc(adapter, flashed_crc,
2375                        (image_size - 4));
2376        if (status) {
2377                dev_err(&adapter->pdev->dev,
2378                "could not get crc from flash, not flashing redboot\n");
2379                return false;
2380        }
2381
2382        /*update redboot only if crc does not match*/
2383        if (!memcmp(flashed_crc, p, 4))
2384                return false;
2385        else
2386                return true;
2387}
2388
2389static int be_flash_data(struct be_adapter *adapter,
2390                        const struct firmware *fw,
2391                        struct be_dma_mem *flash_cmd, int num_of_images)
2392
2393{
2394        int status = 0, i, filehdr_size = 0;
2395        u32 total_bytes = 0, flash_op;
2396        int num_bytes;
2397        const u8 *p = fw->data;
2398        struct be_cmd_write_flashrom *req = flash_cmd->va;
2399        const struct flash_comp *pflashcomp;
2400        int num_comp;
2401
2402        static const struct flash_comp gen3_flash_types[9] = {
2403                { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2404                        FLASH_IMAGE_MAX_SIZE_g3},
2405                { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2406                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2407                { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2408                        FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2409                { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2410                        FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411                { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2412                        FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413                { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2414                        FLASH_IMAGE_MAX_SIZE_g3},
2415                { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2416                        FLASH_IMAGE_MAX_SIZE_g3},
2417                { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2418                        FLASH_IMAGE_MAX_SIZE_g3},
2419                { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2420                        FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2421        };
2422        static const struct flash_comp gen2_flash_types[8] = {
2423                { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2424                        FLASH_IMAGE_MAX_SIZE_g2},
2425                { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2426                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2427                { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2428                        FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2429                { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2430                        FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431                { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2432                        FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433                { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2434                        FLASH_IMAGE_MAX_SIZE_g2},
2435                { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2436                        FLASH_IMAGE_MAX_SIZE_g2},
2437                { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2438                         FLASH_IMAGE_MAX_SIZE_g2}
2439        };
2440
2441        if (adapter->generation == BE_GEN3) {
2442                pflashcomp = gen3_flash_types;
2443                filehdr_size = sizeof(struct flash_file_hdr_g3);
2444                num_comp = ARRAY_SIZE(gen3_flash_types);
2445        } else {
2446                pflashcomp = gen2_flash_types;
2447                filehdr_size = sizeof(struct flash_file_hdr_g2);
2448                num_comp = ARRAY_SIZE(gen2_flash_types);
2449        }
2450        for (i = 0; i < num_comp; i++) {
2451                if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2452                                memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2453                        continue;
2454                if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455                        (!be_flash_redboot(adapter, fw->data,
2456                         pflashcomp[i].offset, pflashcomp[i].size,
2457                         filehdr_size)))
2458                        continue;
2459                p = fw->data;
2460                p += filehdr_size + pflashcomp[i].offset
2461                        + (num_of_images * sizeof(struct image_hdr));
2462        if (p + pflashcomp[i].size > fw->data + fw->size)
2463                return -1;
2464        total_bytes = pflashcomp[i].size;
2465                while (total_bytes) {
2466                        if (total_bytes > 32*1024)
2467                                num_bytes = 32*1024;
2468                        else
2469                                num_bytes = total_bytes;
2470                        total_bytes -= num_bytes;
2471
2472                        if (!total_bytes)
2473                                flash_op = FLASHROM_OPER_FLASH;
2474                        else
2475                                flash_op = FLASHROM_OPER_SAVE;
2476                        memcpy(req->params.data_buf, p, num_bytes);
2477                        p += num_bytes;
2478                        status = be_cmd_write_flashrom(adapter, flash_cmd,
2479                                pflashcomp[i].optype, flash_op, num_bytes);
2480                        if (status) {
2481                                dev_err(&adapter->pdev->dev,
2482                                        "cmd to write to flash rom failed.\n");
2483                                return -1;
2484                        }
2485                        yield();
2486                }
2487        }
2488        return 0;
2489}
2490
2491static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2492{
2493        if (fhdr == NULL)
2494                return 0;
2495        if (fhdr->build[0] == '3')
2496                return BE_GEN3;
2497        else if (fhdr->build[0] == '2')
2498                return BE_GEN2;
2499        else
2500                return 0;
2501}
2502
2503int be_load_fw(struct be_adapter *adapter, u8 *func)
2504{
2505        char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2506        const struct firmware *fw;
2507        struct flash_file_hdr_g2 *fhdr;
2508        struct flash_file_hdr_g3 *fhdr3;
2509        struct image_hdr *img_hdr_ptr = NULL;
2510        struct be_dma_mem flash_cmd;
2511        int status, i = 0, num_imgs = 0;
2512        const u8 *p;
2513
2514        if (!netif_running(adapter->netdev)) {
2515                dev_err(&adapter->pdev->dev,
2516                        "Firmware load not allowed (interface is down)\n");
2517                return -EPERM;
2518        }
2519
2520        strcpy(fw_file, func);
2521
2522        status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2523        if (status)
2524                goto fw_exit;
2525
2526        p = fw->data;
2527        fhdr = (struct flash_file_hdr_g2 *) p;
2528        dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529
2530        flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531        flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2532                                        &flash_cmd.dma);
2533        if (!flash_cmd.va) {
2534                status = -ENOMEM;
2535                dev_err(&adapter->pdev->dev,
2536                        "Memory allocation failure while flashing\n");
2537                goto fw_exit;
2538        }
2539
2540        if ((adapter->generation == BE_GEN3) &&
2541                        (get_ufigen_type(fhdr) == BE_GEN3)) {
2542                fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2543                num_imgs = le32_to_cpu(fhdr3->num_imgs);
2544                for (i = 0; i < num_imgs; i++) {
2545                        img_hdr_ptr = (struct image_hdr *) (fw->data +
2546                                        (sizeof(struct flash_file_hdr_g3) +
2547                                         i * sizeof(struct image_hdr)));
2548                        if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2549                                status = be_flash_data(adapter, fw, &flash_cmd,
2550                                                        num_imgs);
2551                }
2552        } else if ((adapter->generation == BE_GEN2) &&
2553                        (get_ufigen_type(fhdr) == BE_GEN2)) {
2554                status = be_flash_data(adapter, fw, &flash_cmd, 0);
2555        } else {
2556                dev_err(&adapter->pdev->dev,
2557                        "UFI and Interface are not compatible for flashing\n");
2558                status = -1;
2559        }
2560
2561        pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2562                                flash_cmd.dma);
2563        if (status) {
2564                dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565                goto fw_exit;
2566        }
2567
2568        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2569
2570fw_exit:
2571        release_firmware(fw);
2572        return status;
2573}
2574
2575static struct net_device_ops be_netdev_ops = {
2576        .ndo_open               = be_open,
2577        .ndo_stop               = be_close,
2578        .ndo_start_xmit         = be_xmit,
2579        .ndo_set_rx_mode        = be_set_multicast_list,
2580        .ndo_set_mac_address    = be_mac_addr_set,
2581        .ndo_change_mtu         = be_change_mtu,
2582        .ndo_validate_addr      = eth_validate_addr,
2583        .ndo_vlan_rx_register   = be_vlan_register,
2584        .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2585        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2586        .ndo_set_vf_mac         = be_set_vf_mac,
2587        .ndo_set_vf_vlan        = be_set_vf_vlan,
2588        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2589        .ndo_get_vf_config      = be_get_vf_config
2590};
2591
2592static void be_netdev_init(struct net_device *netdev)
2593{
2594        struct be_adapter *adapter = netdev_priv(netdev);
2595        struct be_rx_obj *rxo;
2596        int i;
2597
2598        netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2599                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2600                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2601                NETIF_F_GRO | NETIF_F_TSO6;
2602
2603        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2604                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2605
2606        if (lancer_chip(adapter))
2607                netdev->vlan_features |= NETIF_F_TSO6;
2608
2609        netdev->flags |= IFF_MULTICAST;
2610
2611        adapter->rx_csum = true;
2612
2613        /* Default settings for Rx and Tx flow control */
2614        adapter->rx_fc = true;
2615        adapter->tx_fc = true;
2616
2617        netif_set_gso_max_size(netdev, 65535);
2618
2619        BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2620
2621        SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2622
2623        for_all_rx_queues(adapter, rxo, i)
2624                netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2625                                BE_NAPI_WEIGHT);
2626
2627        netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2628                BE_NAPI_WEIGHT);
2629}
2630
2631static void be_unmap_pci_bars(struct be_adapter *adapter)
2632{
2633        if (adapter->csr)
2634                iounmap(adapter->csr);
2635        if (adapter->db)
2636                iounmap(adapter->db);
2637        if (adapter->pcicfg && be_physfn(adapter))
2638                iounmap(adapter->pcicfg);
2639}
2640
2641static int be_map_pci_bars(struct be_adapter *adapter)
2642{
2643        u8 __iomem *addr;
2644        int pcicfg_reg, db_reg;
2645
2646        if (lancer_chip(adapter)) {
2647                addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2648                        pci_resource_len(adapter->pdev, 0));
2649                if (addr == NULL)
2650                        return -ENOMEM;
2651                adapter->db = addr;
2652                return 0;
2653        }
2654
2655        if (be_physfn(adapter)) {
2656                addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2657                                pci_resource_len(adapter->pdev, 2));
2658                if (addr == NULL)
2659                        return -ENOMEM;
2660                adapter->csr = addr;
2661        }
2662
2663        if (adapter->generation == BE_GEN2) {
2664                pcicfg_reg = 1;
2665                db_reg = 4;
2666        } else {
2667                pcicfg_reg = 0;
2668                if (be_physfn(adapter))
2669                        db_reg = 4;
2670                else
2671                        db_reg = 0;
2672        }
2673        addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2674                                pci_resource_len(adapter->pdev, db_reg));
2675        if (addr == NULL)
2676                goto pci_map_err;
2677        adapter->db = addr;
2678
2679        if (be_physfn(adapter)) {
2680                addr = ioremap_nocache(
2681                                pci_resource_start(adapter->pdev, pcicfg_reg),
2682                                pci_resource_len(adapter->pdev, pcicfg_reg));
2683                if (addr == NULL)
2684                        goto pci_map_err;
2685                adapter->pcicfg = addr;
2686        } else
2687                adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2688
2689        return 0;
2690pci_map_err:
2691        be_unmap_pci_bars(adapter);
2692        return -ENOMEM;
2693}
2694
2695
2696static void be_ctrl_cleanup(struct be_adapter *adapter)
2697{
2698        struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2699
2700        be_unmap_pci_bars(adapter);
2701
2702        if (mem->va)
2703                pci_free_consistent(adapter->pdev, mem->size,
2704                        mem->va, mem->dma);
2705
2706        mem = &adapter->mc_cmd_mem;
2707        if (mem->va)
2708                pci_free_consistent(adapter->pdev, mem->size,
2709                        mem->va, mem->dma);
2710}
2711
2712static int be_ctrl_init(struct be_adapter *adapter)
2713{
2714        struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2715        struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2716        struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2717        int status;
2718
2719        status = be_map_pci_bars(adapter);
2720        if (status)
2721                goto done;
2722
2723        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724        mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2725                                mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2726        if (!mbox_mem_alloc->va) {
2727                status = -ENOMEM;
2728                goto unmap_pci_bars;
2729        }
2730
2731        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2732        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2733        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2734        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2735
2736        mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2737        mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2738                        &mc_cmd_mem->dma);
2739        if (mc_cmd_mem->va == NULL) {
2740                status = -ENOMEM;
2741                goto free_mbox;
2742        }
2743        memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2744
2745        mutex_init(&adapter->mbox_lock);
2746        spin_lock_init(&adapter->mcc_lock);
2747        spin_lock_init(&adapter->mcc_cq_lock);
2748
2749        init_completion(&adapter->flash_compl);
2750        pci_save_state(adapter->pdev);
2751        return 0;
2752
2753free_mbox:
2754        pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2755                mbox_mem_alloc->va, mbox_mem_alloc->dma);
2756
2757unmap_pci_bars:
2758        be_unmap_pci_bars(adapter);
2759
2760done:
2761        return status;
2762}
2763
2764static void be_stats_cleanup(struct be_adapter *adapter)
2765{
2766        struct be_dma_mem *cmd = &adapter->stats_cmd;
2767
2768        if (cmd->va)
2769                pci_free_consistent(adapter->pdev, cmd->size,
2770                        cmd->va, cmd->dma);
2771}
2772
2773static int be_stats_init(struct be_adapter *adapter)
2774{
2775        struct be_dma_mem *cmd = &adapter->stats_cmd;
2776
2777        cmd->size = sizeof(struct be_cmd_req_get_stats);
2778        cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2779        if (cmd->va == NULL)
2780                return -1;
2781        memset(cmd->va, 0, cmd->size);
2782        return 0;
2783}
2784
2785static void __devexit be_remove(struct pci_dev *pdev)
2786{
2787        struct be_adapter *adapter = pci_get_drvdata(pdev);
2788
2789        if (!adapter)
2790                return;
2791
2792        cancel_delayed_work_sync(&adapter->work);
2793
2794        unregister_netdev(adapter->netdev);
2795
2796        be_clear(adapter);
2797
2798        be_stats_cleanup(adapter);
2799
2800        be_ctrl_cleanup(adapter);
2801
2802        be_sriov_disable(adapter);
2803
2804        be_msix_disable(adapter);
2805
2806        pci_set_drvdata(pdev, NULL);
2807        pci_release_regions(pdev);
2808        pci_disable_device(pdev);
2809
2810        free_netdev(adapter->netdev);
2811}
2812
2813static int be_get_config(struct be_adapter *adapter)
2814{
2815        int status;
2816        u8 mac[ETH_ALEN];
2817
2818        status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2819        if (status)
2820                return status;
2821
2822        status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2823                        &adapter->function_mode, &adapter->function_caps);
2824        if (status)
2825                return status;
2826
2827        memset(mac, 0, ETH_ALEN);
2828
2829        if (be_physfn(adapter)) {
2830                status = be_cmd_mac_addr_query(adapter, mac,
2831                        MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2832
2833                if (status)
2834                        return status;
2835
2836                if (!is_valid_ether_addr(mac))
2837                        return -EADDRNOTAVAIL;
2838
2839                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2840                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2841        }
2842
2843        if (adapter->function_mode & 0x400)
2844                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2845        else
2846                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2847
2848        return 0;
2849}
2850
2851static int be_dev_family_check(struct be_adapter *adapter)
2852{
2853        struct pci_dev *pdev = adapter->pdev;
2854        u32 sli_intf = 0, if_type;
2855
2856        switch (pdev->device) {
2857        case BE_DEVICE_ID1:
2858        case OC_DEVICE_ID1:
2859                adapter->generation = BE_GEN2;
2860                break;
2861        case BE_DEVICE_ID2:
2862        case OC_DEVICE_ID2:
2863                adapter->generation = BE_GEN3;
2864                break;
2865        case OC_DEVICE_ID3:
2866                pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2867                if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2868                                                SLI_INTF_IF_TYPE_SHIFT;
2869
2870                if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2871                        if_type != 0x02) {
2872                        dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2873                        return -EINVAL;
2874                }
2875                if (num_vfs > 0) {
2876                        dev_err(&pdev->dev, "VFs not supported\n");
2877                        return -EINVAL;
2878                }
2879                adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2880                                         SLI_INTF_FAMILY_SHIFT);
2881                adapter->generation = BE_GEN3;
2882                break;
2883        default:
2884                adapter->generation = 0;
2885        }
2886        return 0;
2887}
2888
2889static int __devinit be_probe(struct pci_dev *pdev,
2890                        const struct pci_device_id *pdev_id)
2891{
2892        int status = 0;
2893        struct be_adapter *adapter;
2894        struct net_device *netdev;
2895
2896        status = pci_enable_device(pdev);
2897        if (status)
2898                goto do_none;
2899
2900        status = pci_request_regions(pdev, DRV_NAME);
2901        if (status)
2902                goto disable_dev;
2903        pci_set_master(pdev);
2904
2905        netdev = alloc_etherdev(sizeof(struct be_adapter));
2906        if (netdev == NULL) {
2907                status = -ENOMEM;
2908                goto rel_reg;
2909        }
2910        adapter = netdev_priv(netdev);
2911        adapter->pdev = pdev;
2912        pci_set_drvdata(pdev, adapter);
2913
2914        status = be_dev_family_check(adapter);
2915        if (status)
2916                goto free_netdev;
2917
2918        adapter->netdev = netdev;
2919        SET_NETDEV_DEV(netdev, &pdev->dev);
2920
2921        status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2922        if (!status) {
2923                netdev->features |= NETIF_F_HIGHDMA;
2924        } else {
2925                status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2926                if (status) {
2927                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2928                        goto free_netdev;
2929                }
2930        }
2931
2932        be_sriov_enable(adapter);
2933
2934        status = be_ctrl_init(adapter);
2935        if (status)
2936                goto free_netdev;
2937
2938        /* sync up with fw's ready state */
2939        if (be_physfn(adapter)) {
2940                status = be_cmd_POST(adapter);
2941                if (status)
2942                        goto ctrl_clean;
2943        }
2944
2945        /* tell fw we're ready to fire cmds */
2946        status = be_cmd_fw_init(adapter);
2947        if (status)
2948                goto ctrl_clean;
2949
2950        if (be_physfn(adapter)) {
2951                status = be_cmd_reset_function(adapter);
2952                if (status)
2953                        goto ctrl_clean;
2954        }
2955
2956        status = be_stats_init(adapter);
2957        if (status)
2958                goto ctrl_clean;
2959
2960        status = be_get_config(adapter);
2961        if (status)
2962                goto stats_clean;
2963
2964        be_msix_enable(adapter);
2965
2966        INIT_DELAYED_WORK(&adapter->work, be_worker);
2967
2968        status = be_setup(adapter);
2969        if (status)
2970                goto msix_disable;
2971
2972        be_netdev_init(netdev);
2973        status = register_netdev(netdev);
2974        if (status != 0)
2975                goto unsetup;
2976        netif_carrier_off(netdev);
2977
2978        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2979        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2980        return 0;
2981
2982unsetup:
2983        be_clear(adapter);
2984msix_disable:
2985        be_msix_disable(adapter);
2986stats_clean:
2987        be_stats_cleanup(adapter);
2988ctrl_clean:
2989        be_ctrl_cleanup(adapter);
2990free_netdev:
2991        be_sriov_disable(adapter);
2992        free_netdev(netdev);
2993        pci_set_drvdata(pdev, NULL);
2994rel_reg:
2995        pci_release_regions(pdev);
2996disable_dev:
2997        pci_disable_device(pdev);
2998do_none:
2999        dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3000        return status;
3001}
3002
3003static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3004{
3005        struct be_adapter *adapter = pci_get_drvdata(pdev);
3006        struct net_device *netdev =  adapter->netdev;
3007
3008        if (adapter->wol)
3009                be_setup_wol(adapter, true);
3010
3011        netif_device_detach(netdev);
3012        if (netif_running(netdev)) {
3013                rtnl_lock();
3014                be_close(netdev);
3015                rtnl_unlock();
3016        }
3017        be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3018        be_clear(adapter);
3019
3020        pci_save_state(pdev);
3021        pci_disable_device(pdev);
3022        pci_set_power_state(pdev, pci_choose_state(pdev, state));
3023        return 0;
3024}
3025
3026static int be_resume(struct pci_dev *pdev)
3027{
3028        int status = 0;
3029        struct be_adapter *adapter = pci_get_drvdata(pdev);
3030        struct net_device *netdev =  adapter->netdev;
3031
3032        netif_device_detach(netdev);
3033
3034        status = pci_enable_device(pdev);
3035        if (status)
3036                return status;
3037
3038        pci_set_power_state(pdev, 0);
3039        pci_restore_state(pdev);
3040
3041        /* tell fw we're ready to fire cmds */
3042        status = be_cmd_fw_init(adapter);
3043        if (status)
3044                return status;
3045
3046        be_setup(adapter);
3047        if (netif_running(netdev)) {
3048                rtnl_lock();
3049                be_open(netdev);
3050                rtnl_unlock();
3051        }
3052        netif_device_attach(netdev);
3053
3054        if (adapter->wol)
3055                be_setup_wol(adapter, false);
3056        return 0;
3057}
3058
3059/*
3060 * An FLR will stop BE from DMAing any data.
3061 */
3062static void be_shutdown(struct pci_dev *pdev)
3063{
3064        struct be_adapter *adapter = pci_get_drvdata(pdev);
3065        struct net_device *netdev =  adapter->netdev;
3066
3067        netif_device_detach(netdev);
3068
3069        be_cmd_reset_function(adapter);
3070
3071        if (adapter->wol)
3072                be_setup_wol(adapter, true);
3073
3074        pci_disable_device(pdev);
3075}
3076
3077static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3078                                pci_channel_state_t state)
3079{
3080        struct be_adapter *adapter = pci_get_drvdata(pdev);
3081        struct net_device *netdev =  adapter->netdev;
3082
3083        dev_err(&adapter->pdev->dev, "EEH error detected\n");
3084
3085        adapter->eeh_err = true;
3086
3087        netif_device_detach(netdev);
3088
3089        if (netif_running(netdev)) {
3090                rtnl_lock();
3091                be_close(netdev);
3092                rtnl_unlock();
3093        }
3094        be_clear(adapter);
3095
3096        if (state == pci_channel_io_perm_failure)
3097                return PCI_ERS_RESULT_DISCONNECT;
3098
3099        pci_disable_device(pdev);
3100
3101        return PCI_ERS_RESULT_NEED_RESET;
3102}
3103
3104static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3105{
3106        struct be_adapter *adapter = pci_get_drvdata(pdev);
3107        int status;
3108
3109        dev_info(&adapter->pdev->dev, "EEH reset\n");
3110        adapter->eeh_err = false;
3111
3112        status = pci_enable_device(pdev);
3113        if (status)
3114                return PCI_ERS_RESULT_DISCONNECT;
3115
3116        pci_set_master(pdev);
3117        pci_set_power_state(pdev, 0);
3118        pci_restore_state(pdev);
3119
3120        /* Check if card is ok and fw is ready */
3121        status = be_cmd_POST(adapter);
3122        if (status)
3123                return PCI_ERS_RESULT_DISCONNECT;
3124
3125        return PCI_ERS_RESULT_RECOVERED;
3126}
3127
3128static void be_eeh_resume(struct pci_dev *pdev)
3129{
3130        int status = 0;
3131        struct be_adapter *adapter = pci_get_drvdata(pdev);
3132        struct net_device *netdev =  adapter->netdev;
3133
3134        dev_info(&adapter->pdev->dev, "EEH resume\n");
3135
3136        pci_save_state(pdev);
3137
3138        /* tell fw we're ready to fire cmds */
3139        status = be_cmd_fw_init(adapter);
3140        if (status)
3141                goto err;
3142
3143        status = be_setup(adapter);
3144        if (status)
3145                goto err;
3146
3147        if (netif_running(netdev)) {
3148                status = be_open(netdev);
3149                if (status)
3150                        goto err;
3151        }
3152        netif_device_attach(netdev);
3153        return;
3154err:
3155        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3156}
3157
3158static struct pci_error_handlers be_eeh_handlers = {
3159        .error_detected = be_eeh_err_detected,
3160        .slot_reset = be_eeh_reset,
3161        .resume = be_eeh_resume,
3162};
3163
3164static struct pci_driver be_driver = {
3165        .name = DRV_NAME,
3166        .id_table = be_dev_ids,
3167        .probe = be_probe,
3168        .remove = be_remove,
3169        .suspend = be_suspend,
3170        .resume = be_resume,
3171        .shutdown = be_shutdown,
3172        .err_handler = &be_eeh_handlers
3173};
3174
3175static int __init be_init_module(void)
3176{
3177        if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3178            rx_frag_size != 2048) {
3179                printk(KERN_WARNING DRV_NAME
3180                        " : Module param rx_frag_size must be 2048/4096/8192."
3181                        " Using 2048\n");
3182                rx_frag_size = 2048;
3183        }
3184
3185        if (num_vfs > 32) {
3186                printk(KERN_WARNING DRV_NAME
3187                        " : Module param num_vfs must not be greater than 32."
3188                        "Using 32\n");
3189                num_vfs = 32;
3190        }
3191
3192        return pci_register_driver(&be_driver);
3193}
3194module_init(be_init_module);
3195
3196static void __exit be_exit_module(void)
3197{
3198        pci_unregister_driver(&be_driver);
3199}
3200module_exit(be_exit_module);
3201