linux/drivers/net/ethernet/emulex/benet/be_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2013 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#include <linux/prefetch.h>
  19#include <linux/module.h>
  20#include "be.h"
  21#include "be_cmds.h"
  22#include <asm/div64.h>
  23#include <linux/aer.h>
  24#include <linux/if_bridge.h>
  25
  26MODULE_VERSION(DRV_VER);
  27MODULE_DEVICE_TABLE(pci, be_dev_ids);
  28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
  29MODULE_AUTHOR("Emulex Corporation");
  30MODULE_LICENSE("GPL");
  31
  32static unsigned int num_vfs;
  33module_param(num_vfs, uint, S_IRUGO);
  34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
  35
  36static ushort rx_frag_size = 2048;
  37module_param(rx_frag_size, ushort, S_IRUGO);
  38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
  39
  40static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
  41        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  42        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
  43        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  44        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  45        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
  46        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
  47        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
  48        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
  49        { 0 }
  50};
  51MODULE_DEVICE_TABLE(pci, be_dev_ids);
  52/* UE Status Low CSR */
  53static const char * const ue_status_low_desc[] = {
  54        "CEV",
  55        "CTX",
  56        "DBUF",
  57        "ERX",
  58        "Host",
  59        "MPU",
  60        "NDMA",
  61        "PTC ",
  62        "RDMA ",
  63        "RXF ",
  64        "RXIPS ",
  65        "RXULP0 ",
  66        "RXULP1 ",
  67        "RXULP2 ",
  68        "TIM ",
  69        "TPOST ",
  70        "TPRE ",
  71        "TXIPS ",
  72        "TXULP0 ",
  73        "TXULP1 ",
  74        "UC ",
  75        "WDMA ",
  76        "TXULP2 ",
  77        "HOST1 ",
  78        "P0_OB_LINK ",
  79        "P1_OB_LINK ",
  80        "HOST_GPIO ",
  81        "MBOX ",
  82        "AXGMAC0",
  83        "AXGMAC1",
  84        "JTAG",
  85        "MPU_INTPEND"
  86};
  87/* UE Status High CSR */
  88static const char * const ue_status_hi_desc[] = {
  89        "LPCMEMHOST",
  90        "MGMT_MAC",
  91        "PCS0ONLINE",
  92        "MPU_IRAM",
  93        "PCS1ONLINE",
  94        "PCTL0",
  95        "PCTL1",
  96        "PMEM",
  97        "RR",
  98        "TXPB",
  99        "RXPP",
 100        "XAUI",
 101        "TXP",
 102        "ARM",
 103        "IPC",
 104        "HOST2",
 105        "HOST3",
 106        "HOST4",
 107        "HOST5",
 108        "HOST6",
 109        "HOST7",
 110        "HOST8",
 111        "HOST9",
 112        "NETC",
 113        "Unknown",
 114        "Unknown",
 115        "Unknown",
 116        "Unknown",
 117        "Unknown",
 118        "Unknown",
 119        "Unknown",
 120        "Unknown"
 121};
 122
 123/* Is BE in a multi-channel mode */
 124static inline bool be_is_mc(struct be_adapter *adapter) {
 125        return (adapter->function_mode & FLEX10_MODE ||
 126                adapter->function_mode & VNIC_MODE ||
 127                adapter->function_mode & UMC_ENABLED);
 128}
 129
 130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 131{
 132        struct be_dma_mem *mem = &q->dma_mem;
 133        if (mem->va) {
 134                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
 135                                  mem->dma);
 136                mem->va = NULL;
 137        }
 138}
 139
 140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
 141                u16 len, u16 entry_size)
 142{
 143        struct be_dma_mem *mem = &q->dma_mem;
 144
 145        memset(q, 0, sizeof(*q));
 146        q->len = len;
 147        q->entry_size = entry_size;
 148        mem->size = len * entry_size;
 149        mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
 150                                      GFP_KERNEL);
 151        if (!mem->va)
 152                return -ENOMEM;
 153        return 0;
 154}
 155
 156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
 157{
 158        u32 reg, enabled;
 159
 160        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
 161                                &reg);
 162        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 163
 164        if (!enabled && enable)
 165                reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 166        else if (enabled && !enable)
 167                reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 168        else
 169                return;
 170
 171        pci_write_config_dword(adapter->pdev,
 172                        PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
 173}
 174
 175static void be_intr_set(struct be_adapter *adapter, bool enable)
 176{
 177        int status = 0;
 178
 179        /* On lancer interrupts can't be controlled via this register */
 180        if (lancer_chip(adapter))
 181                return;
 182
 183        if (adapter->eeh_error)
 184                return;
 185
 186        status = be_cmd_intr_set(adapter, enable);
 187        if (status)
 188                be_reg_intr_set(adapter, enable);
 189}
 190
 191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 192{
 193        u32 val = 0;
 194        val |= qid & DB_RQ_RING_ID_MASK;
 195        val |= posted << DB_RQ_NUM_POSTED_SHIFT;
 196
 197        wmb();
 198        iowrite32(val, adapter->db + DB_RQ_OFFSET);
 199}
 200
 201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 202                          u16 posted)
 203{
 204        u32 val = 0;
 205        val |= txo->q.id & DB_TXULP_RING_ID_MASK;
 206        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
 207
 208        wmb();
 209        iowrite32(val, adapter->db + txo->db_offset);
 210}
 211
 212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 213                bool arm, bool clear_int, u16 num_popped)
 214{
 215        u32 val = 0;
 216        val |= qid & DB_EQ_RING_ID_MASK;
 217        val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
 218                        DB_EQ_RING_ID_EXT_MASK_SHIFT);
 219
 220        if (adapter->eeh_error)
 221                return;
 222
 223        if (arm)
 224                val |= 1 << DB_EQ_REARM_SHIFT;
 225        if (clear_int)
 226                val |= 1 << DB_EQ_CLR_SHIFT;
 227        val |= 1 << DB_EQ_EVNT_SHIFT;
 228        val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
 229        iowrite32(val, adapter->db + DB_EQ_OFFSET);
 230}
 231
 232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
 233{
 234        u32 val = 0;
 235        val |= qid & DB_CQ_RING_ID_MASK;
 236        val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
 237                        DB_CQ_RING_ID_EXT_MASK_SHIFT);
 238
 239        if (adapter->eeh_error)
 240                return;
 241
 242        if (arm)
 243                val |= 1 << DB_CQ_REARM_SHIFT;
 244        val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
 245        iowrite32(val, adapter->db + DB_CQ_OFFSET);
 246}
 247
 248static int be_mac_addr_set(struct net_device *netdev, void *p)
 249{
 250        struct be_adapter *adapter = netdev_priv(netdev);
 251        struct device *dev = &adapter->pdev->dev;
 252        struct sockaddr *addr = p;
 253        int status;
 254        u8 mac[ETH_ALEN];
 255        u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
 256
 257        if (!is_valid_ether_addr(addr->sa_data))
 258                return -EADDRNOTAVAIL;
 259
 260        /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
 261         * privilege or if PF did not provision the new MAC address.
 262         * On BE3, this cmd will always fail if the VF doesn't have the
 263         * FILTMGMT privilege. This failure is OK, only if the PF programmed
 264         * the MAC for the VF.
 265         */
 266        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
 267                                 adapter->if_handle, &adapter->pmac_id[0], 0);
 268        if (!status) {
 269                curr_pmac_id = adapter->pmac_id[0];
 270
 271                /* Delete the old programmed MAC. This call may fail if the
 272                 * old MAC was already deleted by the PF driver.
 273                 */
 274                if (adapter->pmac_id[0] != old_pmac_id)
 275                        be_cmd_pmac_del(adapter, adapter->if_handle,
 276                                        old_pmac_id, 0);
 277        }
 278
 279        /* Decide if the new MAC is successfully activated only after
 280         * querying the FW
 281         */
 282        status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
 283        if (status)
 284                goto err;
 285
 286        /* The MAC change did not happen, either due to lack of privilege
 287         * or PF didn't pre-provision.
 288         */
 289        if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
 290                status = -EPERM;
 291                goto err;
 292        }
 293
 294        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 295        dev_info(dev, "MAC address changed to %pM\n", mac);
 296        return 0;
 297err:
 298        dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
 299        return status;
 300}
 301
 302/* BE2 supports only v0 cmd */
 303static void *hw_stats_from_cmd(struct be_adapter *adapter)
 304{
 305        if (BE2_chip(adapter)) {
 306                struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
 307
 308                return &cmd->hw_stats;
 309        } else  {
 310                struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
 311
 312                return &cmd->hw_stats;
 313        }
 314}
 315
 316/* BE2 supports only v0 cmd */
 317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
 318{
 319        if (BE2_chip(adapter)) {
 320                struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 321
 322                return &hw_stats->erx;
 323        } else {
 324                struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 325
 326                return &hw_stats->erx;
 327        }
 328}
 329
 330static void populate_be_v0_stats(struct be_adapter *adapter)
 331{
 332        struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 333        struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
 334        struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
 335        struct be_port_rxf_stats_v0 *port_stats =
 336                                        &rxf_stats->port[adapter->port_num];
 337        struct be_drv_stats *drvs = &adapter->drv_stats;
 338
 339        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
 340        drvs->rx_pause_frames = port_stats->rx_pause_frames;
 341        drvs->rx_crc_errors = port_stats->rx_crc_errors;
 342        drvs->rx_control_frames = port_stats->rx_control_frames;
 343        drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
 344        drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
 345        drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
 346        drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
 347        drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
 348        drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
 349        drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
 350        drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
 351        drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
 352        drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
 353        drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
 354        drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
 355        drvs->rx_dropped_header_too_small =
 356                port_stats->rx_dropped_header_too_small;
 357        drvs->rx_address_filtered =
 358                                        port_stats->rx_address_filtered +
 359                                        port_stats->rx_vlan_filtered;
 360        drvs->rx_alignment_symbol_errors =
 361                port_stats->rx_alignment_symbol_errors;
 362
 363        drvs->tx_pauseframes = port_stats->tx_pauseframes;
 364        drvs->tx_controlframes = port_stats->tx_controlframes;
 365
 366        if (adapter->port_num)
 367                drvs->jabber_events = rxf_stats->port1_jabber_events;
 368        else
 369                drvs->jabber_events = rxf_stats->port0_jabber_events;
 370        drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
 371        drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
 372        drvs->forwarded_packets = rxf_stats->forwarded_packets;
 373        drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
 374        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
 375        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
 376        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 377}
 378
 379static void populate_be_v1_stats(struct be_adapter *adapter)
 380{
 381        struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 382        struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
 383        struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
 384        struct be_port_rxf_stats_v1 *port_stats =
 385                                        &rxf_stats->port[adapter->port_num];
 386        struct be_drv_stats *drvs = &adapter->drv_stats;
 387
 388        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
 389        drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
 390        drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
 391        drvs->rx_pause_frames = port_stats->rx_pause_frames;
 392        drvs->rx_crc_errors = port_stats->rx_crc_errors;
 393        drvs->rx_control_frames = port_stats->rx_control_frames;
 394        drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
 395        drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
 396        drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
 397        drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
 398        drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
 399        drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
 400        drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
 401        drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
 402        drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
 403        drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
 404        drvs->rx_dropped_header_too_small =
 405                port_stats->rx_dropped_header_too_small;
 406        drvs->rx_input_fifo_overflow_drop =
 407                port_stats->rx_input_fifo_overflow_drop;
 408        drvs->rx_address_filtered = port_stats->rx_address_filtered;
 409        drvs->rx_alignment_symbol_errors =
 410                port_stats->rx_alignment_symbol_errors;
 411        drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
 412        drvs->tx_pauseframes = port_stats->tx_pauseframes;
 413        drvs->tx_controlframes = port_stats->tx_controlframes;
 414        drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
 415        drvs->jabber_events = port_stats->jabber_events;
 416        drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
 417        drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
 418        drvs->forwarded_packets = rxf_stats->forwarded_packets;
 419        drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
 420        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
 421        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
 422        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 423}
 424
 425static void populate_lancer_stats(struct be_adapter *adapter)
 426{
 427
 428        struct be_drv_stats *drvs = &adapter->drv_stats;
 429        struct lancer_pport_stats *pport_stats =
 430                                        pport_stats_from_cmd(adapter);
 431
 432        be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
 433        drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
 434        drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
 435        drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
 436        drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
 437        drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
 438        drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
 439        drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
 440        drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
 441        drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
 442        drvs->rx_dropped_tcp_length =
 443                                pport_stats->rx_dropped_invalid_tcp_length;
 444        drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
 445        drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
 446        drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
 447        drvs->rx_dropped_header_too_small =
 448                                pport_stats->rx_dropped_header_too_small;
 449        drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
 450        drvs->rx_address_filtered =
 451                                        pport_stats->rx_address_filtered +
 452                                        pport_stats->rx_vlan_filtered;
 453        drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
 454        drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
 455        drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
 456        drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
 457        drvs->jabber_events = pport_stats->rx_jabbers;
 458        drvs->forwarded_packets = pport_stats->num_forwards_lo;
 459        drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
 460        drvs->rx_drops_too_many_frags =
 461                                pport_stats->rx_drops_too_many_frags_lo;
 462}
 463
 464static void accumulate_16bit_val(u32 *acc, u16 val)
 465{
 466#define lo(x)                   (x & 0xFFFF)
 467#define hi(x)                   (x & 0xFFFF0000)
 468        bool wrapped = val < lo(*acc);
 469        u32 newacc = hi(*acc) + val;
 470
 471        if (wrapped)
 472                newacc += 65536;
 473        ACCESS_ONCE(*acc) = newacc;
 474}
 475
 476static void populate_erx_stats(struct be_adapter *adapter,
 477                        struct be_rx_obj *rxo,
 478                        u32 erx_stat)
 479{
 480        if (!BEx_chip(adapter))
 481                rx_stats(rxo)->rx_drops_no_frags = erx_stat;
 482        else
 483                /* below erx HW counter can actually wrap around after
 484                 * 65535. Driver accumulates a 32-bit value
 485                 */
 486                accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
 487                                     (u16)erx_stat);
 488}
 489
 490void be_parse_stats(struct be_adapter *adapter)
 491{
 492        struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
 493        struct be_rx_obj *rxo;
 494        int i;
 495        u32 erx_stat;
 496
 497        if (lancer_chip(adapter)) {
 498                populate_lancer_stats(adapter);
 499        } else {
 500                if (BE2_chip(adapter))
 501                        populate_be_v0_stats(adapter);
 502                else
 503                        /* for BE3 and Skyhawk */
 504                        populate_be_v1_stats(adapter);
 505
 506                /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
 507                for_all_rx_queues(adapter, rxo, i) {
 508                        erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
 509                        populate_erx_stats(adapter, rxo, erx_stat);
 510                }
 511        }
 512}
 513
 514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
 515                                        struct rtnl_link_stats64 *stats)
 516{
 517        struct be_adapter *adapter = netdev_priv(netdev);
 518        struct be_drv_stats *drvs = &adapter->drv_stats;
 519        struct be_rx_obj *rxo;
 520        struct be_tx_obj *txo;
 521        u64 pkts, bytes;
 522        unsigned int start;
 523        int i;
 524
 525        for_all_rx_queues(adapter, rxo, i) {
 526                const struct be_rx_stats *rx_stats = rx_stats(rxo);
 527                do {
 528                        start = u64_stats_fetch_begin_bh(&rx_stats->sync);
 529                        pkts = rx_stats(rxo)->rx_pkts;
 530                        bytes = rx_stats(rxo)->rx_bytes;
 531                } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
 532                stats->rx_packets += pkts;
 533                stats->rx_bytes += bytes;
 534                stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
 535                stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
 536                                        rx_stats(rxo)->rx_drops_no_frags;
 537        }
 538
 539        for_all_tx_queues(adapter, txo, i) {
 540                const struct be_tx_stats *tx_stats = tx_stats(txo);
 541                do {
 542                        start = u64_stats_fetch_begin_bh(&tx_stats->sync);
 543                        pkts = tx_stats(txo)->tx_pkts;
 544                        bytes = tx_stats(txo)->tx_bytes;
 545                } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
 546                stats->tx_packets += pkts;
 547                stats->tx_bytes += bytes;
 548        }
 549
 550        /* bad pkts received */
 551        stats->rx_errors = drvs->rx_crc_errors +
 552                drvs->rx_alignment_symbol_errors +
 553                drvs->rx_in_range_errors +
 554                drvs->rx_out_range_errors +
 555                drvs->rx_frame_too_long +
 556                drvs->rx_dropped_too_small +
 557                drvs->rx_dropped_too_short +
 558                drvs->rx_dropped_header_too_small +
 559                drvs->rx_dropped_tcp_length +
 560                drvs->rx_dropped_runt;
 561
 562        /* detailed rx errors */
 563        stats->rx_length_errors = drvs->rx_in_range_errors +
 564                drvs->rx_out_range_errors +
 565                drvs->rx_frame_too_long;
 566
 567        stats->rx_crc_errors = drvs->rx_crc_errors;
 568
 569        /* frame alignment errors */
 570        stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
 571
 572        /* receiver fifo overrun */
 573        /* drops_no_pbuf is no per i/f, it's per BE card */
 574        stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
 575                                drvs->rx_input_fifo_overflow_drop +
 576                                drvs->rx_drops_no_pbuf;
 577        return stats;
 578}
 579
 580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 581{
 582        struct net_device *netdev = adapter->netdev;
 583
 584        if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
 585                netif_carrier_off(netdev);
 586                adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
 587        }
 588
 589        if ((link_status & LINK_STATUS_MASK) == LINK_UP)
 590                netif_carrier_on(netdev);
 591        else
 592                netif_carrier_off(netdev);
 593}
 594
 595static void be_tx_stats_update(struct be_tx_obj *txo,
 596                        u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
 597{
 598        struct be_tx_stats *stats = tx_stats(txo);
 599
 600        u64_stats_update_begin(&stats->sync);
 601        stats->tx_reqs++;
 602        stats->tx_wrbs += wrb_cnt;
 603        stats->tx_bytes += copied;
 604        stats->tx_pkts += (gso_segs ? gso_segs : 1);
 605        if (stopped)
 606                stats->tx_stops++;
 607        u64_stats_update_end(&stats->sync);
 608}
 609
 610/* Determine number of WRB entries needed to xmit data in an skb */
 611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
 612                                                                bool *dummy)
 613{
 614        int cnt = (skb->len > skb->data_len);
 615
 616        cnt += skb_shinfo(skb)->nr_frags;
 617
 618        /* to account for hdr wrb */
 619        cnt++;
 620        if (lancer_chip(adapter) || !(cnt & 1)) {
 621                *dummy = false;
 622        } else {
 623                /* add a dummy to make it an even num */
 624                cnt++;
 625                *dummy = true;
 626        }
 627        BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
 628        return cnt;
 629}
 630
 631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 632{
 633        wrb->frag_pa_hi = upper_32_bits(addr);
 634        wrb->frag_pa_lo = addr & 0xFFFFFFFF;
 635        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 636        wrb->rsvd0 = 0;
 637}
 638
 639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
 640                                        struct sk_buff *skb)
 641{
 642        u8 vlan_prio;
 643        u16 vlan_tag;
 644
 645        vlan_tag = vlan_tx_tag_get(skb);
 646        vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 647        /* If vlan priority provided by OS is NOT in available bmap */
 648        if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
 649                vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
 650                                adapter->recommended_prio;
 651
 652        return vlan_tag;
 653}
 654
 655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 656                struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
 657{
 658        u16 vlan_tag;
 659
 660        memset(hdr, 0, sizeof(*hdr));
 661
 662        AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
 663
 664        if (skb_is_gso(skb)) {
 665                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
 666                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
 667                        hdr, skb_shinfo(skb)->gso_size);
 668                if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
 669                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
 670        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 671                if (is_tcp_pkt(skb))
 672                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
 673                else if (is_udp_pkt(skb))
 674                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
 675        }
 676
 677        if (vlan_tx_tag_present(skb)) {
 678                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
 679                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 680                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
 681        }
 682
 683        /* To skip HW VLAN tagging: evt = 1, compl = 0 */
 684        AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
 685        AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
 686        AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
 687        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 688}
 689
 690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 691                bool unmap_single)
 692{
 693        dma_addr_t dma;
 694
 695        be_dws_le_to_cpu(wrb, sizeof(*wrb));
 696
 697        dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
 698        if (wrb->frag_len) {
 699                if (unmap_single)
 700                        dma_unmap_single(dev, dma, wrb->frag_len,
 701                                         DMA_TO_DEVICE);
 702                else
 703                        dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
 704        }
 705}
 706
 707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
 708                struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
 709                bool skip_hw_vlan)
 710{
 711        dma_addr_t busaddr;
 712        int i, copied = 0;
 713        struct device *dev = &adapter->pdev->dev;
 714        struct sk_buff *first_skb = skb;
 715        struct be_eth_wrb *wrb;
 716        struct be_eth_hdr_wrb *hdr;
 717        bool map_single = false;
 718        u16 map_head;
 719
 720        hdr = queue_head_node(txq);
 721        queue_head_inc(txq);
 722        map_head = txq->head;
 723
 724        if (skb->len > skb->data_len) {
 725                int len = skb_headlen(skb);
 726                busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
 727                if (dma_mapping_error(dev, busaddr))
 728                        goto dma_err;
 729                map_single = true;
 730                wrb = queue_head_node(txq);
 731                wrb_fill(wrb, busaddr, len);
 732                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 733                queue_head_inc(txq);
 734                copied += len;
 735        }
 736
 737        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 738                const struct skb_frag_struct *frag =
 739                        &skb_shinfo(skb)->frags[i];
 740                busaddr = skb_frag_dma_map(dev, frag, 0,
 741                                           skb_frag_size(frag), DMA_TO_DEVICE);
 742                if (dma_mapping_error(dev, busaddr))
 743                        goto dma_err;
 744                wrb = queue_head_node(txq);
 745                wrb_fill(wrb, busaddr, skb_frag_size(frag));
 746                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 747                queue_head_inc(txq);
 748                copied += skb_frag_size(frag);
 749        }
 750
 751        if (dummy_wrb) {
 752                wrb = queue_head_node(txq);
 753                wrb_fill(wrb, 0, 0);
 754                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 755                queue_head_inc(txq);
 756        }
 757
 758        wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
 759        be_dws_cpu_to_le(hdr, sizeof(*hdr));
 760
 761        return copied;
 762dma_err:
 763        txq->head = map_head;
 764        while (copied) {
 765                wrb = queue_head_node(txq);
 766                unmap_tx_frag(dev, wrb, map_single);
 767                map_single = false;
 768                copied -= wrb->frag_len;
 769                queue_head_inc(txq);
 770        }
 771        return 0;
 772}
 773
 774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 775                                             struct sk_buff *skb,
 776                                             bool *skip_hw_vlan)
 777{
 778        u16 vlan_tag = 0;
 779
 780        skb = skb_share_check(skb, GFP_ATOMIC);
 781        if (unlikely(!skb))
 782                return skb;
 783
 784        if (vlan_tx_tag_present(skb))
 785                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 786
 787        if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
 788                if (!vlan_tag)
 789                        vlan_tag = adapter->pvid;
 790                /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
 791                 * skip VLAN insertion
 792                 */
 793                if (skip_hw_vlan)
 794                        *skip_hw_vlan = true;
 795        }
 796
 797        if (vlan_tag) {
 798                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 799                if (unlikely(!skb))
 800                        return skb;
 801                skb->vlan_tci = 0;
 802        }
 803
 804        /* Insert the outer VLAN, if any */
 805        if (adapter->qnq_vid) {
 806                vlan_tag = adapter->qnq_vid;
 807                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 808                if (unlikely(!skb))
 809                        return skb;
 810                if (skip_hw_vlan)
 811                        *skip_hw_vlan = true;
 812        }
 813
 814        return skb;
 815}
 816
 817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
 818{
 819        struct ethhdr *eh = (struct ethhdr *)skb->data;
 820        u16 offset = ETH_HLEN;
 821
 822        if (eh->h_proto == htons(ETH_P_IPV6)) {
 823                struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
 824
 825                offset += sizeof(struct ipv6hdr);
 826                if (ip6h->nexthdr != NEXTHDR_TCP &&
 827                    ip6h->nexthdr != NEXTHDR_UDP) {
 828                        struct ipv6_opt_hdr *ehdr =
 829                                (struct ipv6_opt_hdr *) (skb->data + offset);
 830
 831                        /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
 832                        if (ehdr->hdrlen == 0xff)
 833                                return true;
 834                }
 835        }
 836        return false;
 837}
 838
 839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
 840{
 841        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 842}
 843
 844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
 845                                struct sk_buff *skb)
 846{
 847        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 848}
 849
 850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
 851                                           struct sk_buff *skb,
 852                                           bool *skip_hw_vlan)
 853{
 854        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 855        unsigned int eth_hdr_len;
 856        struct iphdr *ip;
 857
 858        /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
 859         * may cause a transmit stall on that port. So the work-around is to
 860         * pad short packets (<= 32 bytes) to a 36-byte length.
 861         */
 862        if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
 863                if (skb_padto(skb, 36))
 864                        goto tx_drop;
 865                skb->len = 36;
 866        }
 867
 868        /* For padded packets, BE HW modifies tot_len field in IP header
 869         * incorrecly when VLAN tag is inserted by HW.
 870         * For padded packets, Lancer computes incorrect checksum.
 871         */
 872        eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
 873                                                VLAN_ETH_HLEN : ETH_HLEN;
 874        if (skb->len <= 60 &&
 875            (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
 876            is_ipv4_pkt(skb)) {
 877                ip = (struct iphdr *)ip_hdr(skb);
 878                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
 879        }
 880
 881        /* If vlan tag is already inlined in the packet, skip HW VLAN
 882         * tagging in UMC mode
 883         */
 884        if ((adapter->function_mode & UMC_ENABLED) &&
 885            veh->h_vlan_proto == htons(ETH_P_8021Q))
 886                        *skip_hw_vlan = true;
 887
 888        /* HW has a bug wherein it will calculate CSUM for VLAN
 889         * pkts even though it is disabled.
 890         * Manually insert VLAN in pkt.
 891         */
 892        if (skb->ip_summed != CHECKSUM_PARTIAL &&
 893            vlan_tx_tag_present(skb)) {
 894                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
 895                if (unlikely(!skb))
 896                        goto tx_drop;
 897        }
 898
 899        /* HW may lockup when VLAN HW tagging is requested on
 900         * certain ipv6 packets. Drop such pkts if the HW workaround to
 901         * skip HW tagging is not enabled by FW.
 902         */
 903        if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
 904            (adapter->pvid || adapter->qnq_vid) &&
 905            !qnq_async_evt_rcvd(adapter)))
 906                goto tx_drop;
 907
 908        /* Manual VLAN tag insertion to prevent:
 909         * ASIC lockup when the ASIC inserts VLAN tag into
 910         * certain ipv6 packets. Insert VLAN tags in driver,
 911         * and set event, completion, vlan bits accordingly
 912         * in the Tx WRB.
 913         */
 914        if (be_ipv6_tx_stall_chk(adapter, skb) &&
 915            be_vlan_tag_tx_chk(adapter, skb)) {
 916                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
 917                if (unlikely(!skb))
 918                        goto tx_drop;
 919        }
 920
 921        return skb;
 922tx_drop:
 923        dev_kfree_skb_any(skb);
 924        return NULL;
 925}
 926
 927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 928{
 929        struct be_adapter *adapter = netdev_priv(netdev);
 930        struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
 931        struct be_queue_info *txq = &txo->q;
 932        bool dummy_wrb, stopped = false;
 933        u32 wrb_cnt = 0, copied = 0;
 934        bool skip_hw_vlan = false;
 935        u32 start = txq->head;
 936
 937        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
 938        if (!skb)
 939                return NETDEV_TX_OK;
 940
 941        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 942
 943        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
 944                              skip_hw_vlan);
 945        if (copied) {
 946                int gso_segs = skb_shinfo(skb)->gso_segs;
 947
 948                /* record the sent skb in the sent_skb table */
 949                BUG_ON(txo->sent_skb_list[start]);
 950                txo->sent_skb_list[start] = skb;
 951
 952                /* Ensure txq has space for the next skb; Else stop the queue
 953                 * *BEFORE* ringing the tx doorbell, so that we serialze the
 954                 * tx compls of the current transmit which'll wake up the queue
 955                 */
 956                atomic_add(wrb_cnt, &txq->used);
 957                if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
 958                                                                txq->len) {
 959                        netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
 960                        stopped = true;
 961                }
 962
 963                be_txq_notify(adapter, txo, wrb_cnt);
 964
 965                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
 966        } else {
 967                txq->head = start;
 968                dev_kfree_skb_any(skb);
 969        }
 970        return NETDEV_TX_OK;
 971}
 972
 973static int be_change_mtu(struct net_device *netdev, int new_mtu)
 974{
 975        struct be_adapter *adapter = netdev_priv(netdev);
 976        if (new_mtu < BE_MIN_MTU ||
 977                        new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
 978                                        (ETH_HLEN + ETH_FCS_LEN))) {
 979                dev_info(&adapter->pdev->dev,
 980                        "MTU must be between %d and %d bytes\n",
 981                        BE_MIN_MTU,
 982                        (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
 983                return -EINVAL;
 984        }
 985        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
 986                        netdev->mtu, new_mtu);
 987        netdev->mtu = new_mtu;
 988        return 0;
 989}
 990
 991/*
 992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 993 * If the user configures more, place BE in vlan promiscuous mode.
 994 */
 995static int be_vid_config(struct be_adapter *adapter)
 996{
 997        u16 vids[BE_NUM_VLANS_SUPPORTED];
 998        u16 num = 0, i;
 999        int status = 0;
1000
1001        /* No need to further configure vids if in promiscuous mode */
1002        if (adapter->promiscuous)
1003                return 0;
1004
1005        if (adapter->vlans_added > be_max_vlans(adapter))
1006                goto set_vlan_promisc;
1007
1008        /* Construct VLAN Table to give to HW */
1009        for (i = 0; i < VLAN_N_VID; i++)
1010                if (adapter->vlan_tag[i])
1011                        vids[num++] = cpu_to_le16(i);
1012
1013        status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014                                    vids, num, 1, 0);
1015
1016        if (status) {
1017                /* Set to VLAN promisc mode as setting VLAN filter failed */
1018                if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019                        goto set_vlan_promisc;
1020                dev_err(&adapter->pdev->dev,
1021                        "Setting HW VLAN filtering failed.\n");
1022        } else {
1023                if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024                        /* hw VLAN filtering re-enabled. */
1025                        status = be_cmd_rx_filter(adapter,
1026                                                  BE_FLAGS_VLAN_PROMISC, OFF);
1027                        if (!status) {
1028                                dev_info(&adapter->pdev->dev,
1029                                         "Disabling VLAN Promiscuous mode.\n");
1030                                adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031                                dev_info(&adapter->pdev->dev,
1032                                         "Re-Enabling HW VLAN filtering\n");
1033                        }
1034                }
1035        }
1036
1037        return status;
1038
1039set_vlan_promisc:
1040        dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042        status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043        if (!status) {
1044                dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045                dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046                adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047        } else
1048                dev_err(&adapter->pdev->dev,
1049                        "Failed to enable VLAN Promiscuous mode.\n");
1050        return status;
1051}
1052
1053static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1054{
1055        struct be_adapter *adapter = netdev_priv(netdev);
1056        int status = 0;
1057
1058
1059        /* Packets with VID 0 are always received by Lancer by default */
1060        if (lancer_chip(adapter) && vid == 0)
1061                goto ret;
1062
1063        adapter->vlan_tag[vid] = 1;
1064        if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1065                status = be_vid_config(adapter);
1066
1067        if (!status)
1068                adapter->vlans_added++;
1069        else
1070                adapter->vlan_tag[vid] = 0;
1071ret:
1072        return status;
1073}
1074
1075static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1076{
1077        struct be_adapter *adapter = netdev_priv(netdev);
1078        int status = 0;
1079
1080        /* Packets with VID 0 are always received by Lancer by default */
1081        if (lancer_chip(adapter) && vid == 0)
1082                goto ret;
1083
1084        adapter->vlan_tag[vid] = 0;
1085        if (adapter->vlans_added <= be_max_vlans(adapter))
1086                status = be_vid_config(adapter);
1087
1088        if (!status)
1089                adapter->vlans_added--;
1090        else
1091                adapter->vlan_tag[vid] = 1;
1092ret:
1093        return status;
1094}
1095
1096static void be_set_rx_mode(struct net_device *netdev)
1097{
1098        struct be_adapter *adapter = netdev_priv(netdev);
1099        int status;
1100
1101        if (netdev->flags & IFF_PROMISC) {
1102                be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1103                adapter->promiscuous = true;
1104                goto done;
1105        }
1106
1107        /* BE was previously in promiscuous mode; disable it */
1108        if (adapter->promiscuous) {
1109                adapter->promiscuous = false;
1110                be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1111
1112                if (adapter->vlans_added)
1113                        be_vid_config(adapter);
1114        }
1115
1116        /* Enable multicast promisc if num configured exceeds what we support */
1117        if (netdev->flags & IFF_ALLMULTI ||
1118            netdev_mc_count(netdev) > be_max_mc(adapter)) {
1119                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1120                goto done;
1121        }
1122
1123        if (netdev_uc_count(netdev) != adapter->uc_macs) {
1124                struct netdev_hw_addr *ha;
1125                int i = 1; /* First slot is claimed by the Primary MAC */
1126
1127                for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1128                        be_cmd_pmac_del(adapter, adapter->if_handle,
1129                                        adapter->pmac_id[i], 0);
1130                }
1131
1132                if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1133                        be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1134                        adapter->promiscuous = true;
1135                        goto done;
1136                }
1137
1138                netdev_for_each_uc_addr(ha, adapter->netdev) {
1139                        adapter->uc_macs++; /* First slot is for Primary MAC */
1140                        be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1141                                        adapter->if_handle,
1142                                        &adapter->pmac_id[adapter->uc_macs], 0);
1143                }
1144        }
1145
1146        status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1147
1148        /* Set to MCAST promisc mode if setting MULTICAST address fails */
1149        if (status) {
1150                dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1151                dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1152                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1153        }
1154done:
1155        return;
1156}
1157
1158static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1159{
1160        struct be_adapter *adapter = netdev_priv(netdev);
1161        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1162        int status;
1163
1164        if (!sriov_enabled(adapter))
1165                return -EPERM;
1166
1167        if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1168                return -EINVAL;
1169
1170        if (BEx_chip(adapter)) {
1171                be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1172                                vf + 1);
1173
1174                status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1175                                         &vf_cfg->pmac_id, vf + 1);
1176        } else {
1177                status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1178                                        vf + 1);
1179        }
1180
1181        if (status)
1182                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1183                                mac, vf);
1184        else
1185                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1186
1187        return status;
1188}
1189
1190static int be_get_vf_config(struct net_device *netdev, int vf,
1191                        struct ifla_vf_info *vi)
1192{
1193        struct be_adapter *adapter = netdev_priv(netdev);
1194        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1195
1196        if (!sriov_enabled(adapter))
1197                return -EPERM;
1198
1199        if (vf >= adapter->num_vfs)
1200                return -EINVAL;
1201
1202        vi->vf = vf;
1203        vi->tx_rate = vf_cfg->tx_rate;
1204        vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1205        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1206        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1207
1208        return 0;
1209}
1210
1211static int be_set_vf_vlan(struct net_device *netdev,
1212                        int vf, u16 vlan, u8 qos)
1213{
1214        struct be_adapter *adapter = netdev_priv(netdev);
1215        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1216        int status = 0;
1217
1218        if (!sriov_enabled(adapter))
1219                return -EPERM;
1220
1221        if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1222                return -EINVAL;
1223
1224        if (vlan || qos) {
1225                vlan |= qos << VLAN_PRIO_SHIFT;
1226                if (vf_cfg->vlan_tag != vlan) {
1227                        /* If this is new value, program it. Else skip. */
1228                        vf_cfg->vlan_tag = vlan;
1229                        status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230                                                       vf_cfg->if_handle, 0);
1231                }
1232        } else {
1233                /* Reset Transparent Vlan Tagging. */
1234                vf_cfg->vlan_tag = 0;
1235                vlan = vf_cfg->def_vid;
1236                status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1237                                               vf_cfg->if_handle, 0);
1238        }
1239
1240
1241        if (status)
1242                dev_info(&adapter->pdev->dev,
1243                                "VLAN %d config on VF %d failed\n", vlan, vf);
1244        return status;
1245}
1246
1247static int be_set_vf_tx_rate(struct net_device *netdev,
1248                        int vf, int rate)
1249{
1250        struct be_adapter *adapter = netdev_priv(netdev);
1251        int status = 0;
1252
1253        if (!sriov_enabled(adapter))
1254                return -EPERM;
1255
1256        if (vf >= adapter->num_vfs)
1257                return -EINVAL;
1258
1259        if (rate < 100 || rate > 10000) {
1260                dev_err(&adapter->pdev->dev,
1261                        "tx rate must be between 100 and 10000 Mbps\n");
1262                return -EINVAL;
1263        }
1264
1265        if (lancer_chip(adapter))
1266                status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1267        else
1268                status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1269
1270        if (status)
1271                dev_err(&adapter->pdev->dev,
1272                                "tx rate %d on VF %d failed\n", rate, vf);
1273        else
1274                adapter->vf_cfg[vf].tx_rate = rate;
1275        return status;
1276}
1277
1278static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1279{
1280        struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281        ulong now = jiffies;
1282        ulong delta = now - stats->rx_jiffies;
1283        u64 pkts;
1284        unsigned int start, eqd;
1285
1286        if (!eqo->enable_aic) {
1287                eqd = eqo->eqd;
1288                goto modify_eqd;
1289        }
1290
1291        if (eqo->idx >= adapter->num_rx_qs)
1292                return;
1293
1294        stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1295
1296        /* Wrapped around */
1297        if (time_before(now, stats->rx_jiffies)) {
1298                stats->rx_jiffies = now;
1299                return;
1300        }
1301
1302        /* Update once a second */
1303        if (delta < HZ)
1304                return;
1305
1306        do {
1307                start = u64_stats_fetch_begin_bh(&stats->sync);
1308                pkts = stats->rx_pkts;
1309        } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1310
1311        stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1312        stats->rx_pkts_prev = pkts;
1313        stats->rx_jiffies = now;
1314        eqd = (stats->rx_pps / 110000) << 3;
1315        eqd = min(eqd, eqo->max_eqd);
1316        eqd = max(eqd, eqo->min_eqd);
1317        if (eqd < 10)
1318                eqd = 0;
1319
1320modify_eqd:
1321        if (eqd != eqo->cur_eqd) {
1322                be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1323                eqo->cur_eqd = eqd;
1324        }
1325}
1326
1327static void be_rx_stats_update(struct be_rx_obj *rxo,
1328                struct be_rx_compl_info *rxcp)
1329{
1330        struct be_rx_stats *stats = rx_stats(rxo);
1331
1332        u64_stats_update_begin(&stats->sync);
1333        stats->rx_compl++;
1334        stats->rx_bytes += rxcp->pkt_size;
1335        stats->rx_pkts++;
1336        if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1337                stats->rx_mcast_pkts++;
1338        if (rxcp->err)
1339                stats->rx_compl_err++;
1340        u64_stats_update_end(&stats->sync);
1341}
1342
1343static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1344{
1345        /* L4 checksum is not reliable for non TCP/UDP packets.
1346         * Also ignore ipcksm for ipv6 pkts */
1347        return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1348                                (rxcp->ip_csum || rxcp->ipv6);
1349}
1350
1351static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1352                                                u16 frag_idx)
1353{
1354        struct be_adapter *adapter = rxo->adapter;
1355        struct be_rx_page_info *rx_page_info;
1356        struct be_queue_info *rxq = &rxo->q;
1357
1358        rx_page_info = &rxo->page_info_tbl[frag_idx];
1359        BUG_ON(!rx_page_info->page);
1360
1361        if (rx_page_info->last_page_user) {
1362                dma_unmap_page(&adapter->pdev->dev,
1363                               dma_unmap_addr(rx_page_info, bus),
1364                               adapter->big_page_size, DMA_FROM_DEVICE);
1365                rx_page_info->last_page_user = false;
1366        }
1367
1368        atomic_dec(&rxq->used);
1369        return rx_page_info;
1370}
1371
1372/* Throwaway the data in the Rx completion */
1373static void be_rx_compl_discard(struct be_rx_obj *rxo,
1374                                struct be_rx_compl_info *rxcp)
1375{
1376        struct be_queue_info *rxq = &rxo->q;
1377        struct be_rx_page_info *page_info;
1378        u16 i, num_rcvd = rxcp->num_rcvd;
1379
1380        for (i = 0; i < num_rcvd; i++) {
1381                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1382                put_page(page_info->page);
1383                memset(page_info, 0, sizeof(*page_info));
1384                index_inc(&rxcp->rxq_idx, rxq->len);
1385        }
1386}
1387
1388/*
1389 * skb_fill_rx_data forms a complete skb for an ether frame
1390 * indicated by rxcp.
1391 */
1392static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1393                             struct be_rx_compl_info *rxcp)
1394{
1395        struct be_queue_info *rxq = &rxo->q;
1396        struct be_rx_page_info *page_info;
1397        u16 i, j;
1398        u16 hdr_len, curr_frag_len, remaining;
1399        u8 *start;
1400
1401        page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1402        start = page_address(page_info->page) + page_info->page_offset;
1403        prefetch(start);
1404
1405        /* Copy data in the first descriptor of this completion */
1406        curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1407
1408        skb->len = curr_frag_len;
1409        if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1410                memcpy(skb->data, start, curr_frag_len);
1411                /* Complete packet has now been moved to data */
1412                put_page(page_info->page);
1413                skb->data_len = 0;
1414                skb->tail += curr_frag_len;
1415        } else {
1416                hdr_len = ETH_HLEN;
1417                memcpy(skb->data, start, hdr_len);
1418                skb_shinfo(skb)->nr_frags = 1;
1419                skb_frag_set_page(skb, 0, page_info->page);
1420                skb_shinfo(skb)->frags[0].page_offset =
1421                                        page_info->page_offset + hdr_len;
1422                skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1423                skb->data_len = curr_frag_len - hdr_len;
1424                skb->truesize += rx_frag_size;
1425                skb->tail += hdr_len;
1426        }
1427        page_info->page = NULL;
1428
1429        if (rxcp->pkt_size <= rx_frag_size) {
1430                BUG_ON(rxcp->num_rcvd != 1);
1431                return;
1432        }
1433
1434        /* More frags present for this completion */
1435        index_inc(&rxcp->rxq_idx, rxq->len);
1436        remaining = rxcp->pkt_size - curr_frag_len;
1437        for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1438                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1439                curr_frag_len = min(remaining, rx_frag_size);
1440
1441                /* Coalesce all frags from the same physical page in one slot */
1442                if (page_info->page_offset == 0) {
1443                        /* Fresh page */
1444                        j++;
1445                        skb_frag_set_page(skb, j, page_info->page);
1446                        skb_shinfo(skb)->frags[j].page_offset =
1447                                                        page_info->page_offset;
1448                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1449                        skb_shinfo(skb)->nr_frags++;
1450                } else {
1451                        put_page(page_info->page);
1452                }
1453
1454                skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1455                skb->len += curr_frag_len;
1456                skb->data_len += curr_frag_len;
1457                skb->truesize += rx_frag_size;
1458                remaining -= curr_frag_len;
1459                index_inc(&rxcp->rxq_idx, rxq->len);
1460                page_info->page = NULL;
1461        }
1462        BUG_ON(j > MAX_SKB_FRAGS);
1463}
1464
1465/* Process the RX completion indicated by rxcp when GRO is disabled */
1466static void be_rx_compl_process(struct be_rx_obj *rxo,
1467                                struct be_rx_compl_info *rxcp)
1468{
1469        struct be_adapter *adapter = rxo->adapter;
1470        struct net_device *netdev = adapter->netdev;
1471        struct sk_buff *skb;
1472
1473        skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1474        if (unlikely(!skb)) {
1475                rx_stats(rxo)->rx_drops_no_skbs++;
1476                be_rx_compl_discard(rxo, rxcp);
1477                return;
1478        }
1479
1480        skb_fill_rx_data(rxo, skb, rxcp);
1481
1482        if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1483                skb->ip_summed = CHECKSUM_UNNECESSARY;
1484        else
1485                skb_checksum_none_assert(skb);
1486
1487        skb->protocol = eth_type_trans(skb, netdev);
1488        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1489        if (netdev->features & NETIF_F_RXHASH)
1490                skb->rxhash = rxcp->rss_hash;
1491
1492
1493        if (rxcp->vlanf)
1494                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1495
1496        netif_receive_skb(skb);
1497}
1498
1499/* Process the RX completion indicated by rxcp when GRO is enabled */
1500static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1501                                    struct napi_struct *napi,
1502                                    struct be_rx_compl_info *rxcp)
1503{
1504        struct be_adapter *adapter = rxo->adapter;
1505        struct be_rx_page_info *page_info;
1506        struct sk_buff *skb = NULL;
1507        struct be_queue_info *rxq = &rxo->q;
1508        u16 remaining, curr_frag_len;
1509        u16 i, j;
1510
1511        skb = napi_get_frags(napi);
1512        if (!skb) {
1513                be_rx_compl_discard(rxo, rxcp);
1514                return;
1515        }
1516
1517        remaining = rxcp->pkt_size;
1518        for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1519                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1520
1521                curr_frag_len = min(remaining, rx_frag_size);
1522
1523                /* Coalesce all frags from the same physical page in one slot */
1524                if (i == 0 || page_info->page_offset == 0) {
1525                        /* First frag or Fresh page */
1526                        j++;
1527                        skb_frag_set_page(skb, j, page_info->page);
1528                        skb_shinfo(skb)->frags[j].page_offset =
1529                                                        page_info->page_offset;
1530                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1531                } else {
1532                        put_page(page_info->page);
1533                }
1534                skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1535                skb->truesize += rx_frag_size;
1536                remaining -= curr_frag_len;
1537                index_inc(&rxcp->rxq_idx, rxq->len);
1538                memset(page_info, 0, sizeof(*page_info));
1539        }
1540        BUG_ON(j > MAX_SKB_FRAGS);
1541
1542        skb_shinfo(skb)->nr_frags = j + 1;
1543        skb->len = rxcp->pkt_size;
1544        skb->data_len = rxcp->pkt_size;
1545        skb->ip_summed = CHECKSUM_UNNECESSARY;
1546        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1547        if (adapter->netdev->features & NETIF_F_RXHASH)
1548                skb->rxhash = rxcp->rss_hash;
1549
1550        if (rxcp->vlanf)
1551                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1552
1553        napi_gro_frags(napi);
1554}
1555
1556static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1557                                 struct be_rx_compl_info *rxcp)
1558{
1559        rxcp->pkt_size =
1560                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1561        rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1562        rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1563        rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1564        rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1565        rxcp->ip_csum =
1566                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1567        rxcp->l4_csum =
1568                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1569        rxcp->ipv6 =
1570                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1571        rxcp->rxq_idx =
1572                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1573        rxcp->num_rcvd =
1574                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1575        rxcp->pkt_type =
1576                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1577        rxcp->rss_hash =
1578                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1579        if (rxcp->vlanf) {
1580                rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1581                                          compl);
1582                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1583                                               compl);
1584        }
1585        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1586}
1587
1588static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1589                                 struct be_rx_compl_info *rxcp)
1590{
1591        rxcp->pkt_size =
1592                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1593        rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1594        rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1595        rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1596        rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1597        rxcp->ip_csum =
1598                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1599        rxcp->l4_csum =
1600                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1601        rxcp->ipv6 =
1602                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1603        rxcp->rxq_idx =
1604                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1605        rxcp->num_rcvd =
1606                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1607        rxcp->pkt_type =
1608                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1609        rxcp->rss_hash =
1610                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1611        if (rxcp->vlanf) {
1612                rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1613                                          compl);
1614                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1615                                               compl);
1616        }
1617        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1618        rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1619                                      ip_frag, compl);
1620}
1621
1622static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1623{
1624        struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1625        struct be_rx_compl_info *rxcp = &rxo->rxcp;
1626        struct be_adapter *adapter = rxo->adapter;
1627
1628        /* For checking the valid bit it is Ok to use either definition as the
1629         * valid bit is at the same position in both v0 and v1 Rx compl */
1630        if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1631                return NULL;
1632
1633        rmb();
1634        be_dws_le_to_cpu(compl, sizeof(*compl));
1635
1636        if (adapter->be3_native)
1637                be_parse_rx_compl_v1(compl, rxcp);
1638        else
1639                be_parse_rx_compl_v0(compl, rxcp);
1640
1641        if (rxcp->ip_frag)
1642                rxcp->l4_csum = 0;
1643
1644        if (rxcp->vlanf) {
1645                /* vlanf could be wrongly set in some cards.
1646                 * ignore if vtm is not set */
1647                if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1648                        rxcp->vlanf = 0;
1649
1650                if (!lancer_chip(adapter))
1651                        rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1652
1653                if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1654                    !adapter->vlan_tag[rxcp->vlan_tag])
1655                        rxcp->vlanf = 0;
1656        }
1657
1658        /* As the compl has been parsed, reset it; we wont touch it again */
1659        compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1660
1661        queue_tail_inc(&rxo->cq);
1662        return rxcp;
1663}
1664
1665static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1666{
1667        u32 order = get_order(size);
1668
1669        if (order > 0)
1670                gfp |= __GFP_COMP;
1671        return  alloc_pages(gfp, order);
1672}
1673
1674/*
1675 * Allocate a page, split it to fragments of size rx_frag_size and post as
1676 * receive buffers to BE
1677 */
1678static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1679{
1680        struct be_adapter *adapter = rxo->adapter;
1681        struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1682        struct be_queue_info *rxq = &rxo->q;
1683        struct page *pagep = NULL;
1684        struct be_eth_rx_d *rxd;
1685        u64 page_dmaaddr = 0, frag_dmaaddr;
1686        u32 posted, page_offset = 0;
1687
1688        page_info = &rxo->page_info_tbl[rxq->head];
1689        for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1690                if (!pagep) {
1691                        pagep = be_alloc_pages(adapter->big_page_size, gfp);
1692                        if (unlikely(!pagep)) {
1693                                rx_stats(rxo)->rx_post_fail++;
1694                                break;
1695                        }
1696                        page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1697                                                    0, adapter->big_page_size,
1698                                                    DMA_FROM_DEVICE);
1699                        page_info->page_offset = 0;
1700                } else {
1701                        get_page(pagep);
1702                        page_info->page_offset = page_offset + rx_frag_size;
1703                }
1704                page_offset = page_info->page_offset;
1705                page_info->page = pagep;
1706                dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1707                frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1708
1709                rxd = queue_head_node(rxq);
1710                rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1711                rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1712
1713                /* Any space left in the current big page for another frag? */
1714                if ((page_offset + rx_frag_size + rx_frag_size) >
1715                                        adapter->big_page_size) {
1716                        pagep = NULL;
1717                        page_info->last_page_user = true;
1718                }
1719
1720                prev_page_info = page_info;
1721                queue_head_inc(rxq);
1722                page_info = &rxo->page_info_tbl[rxq->head];
1723        }
1724        if (pagep)
1725                prev_page_info->last_page_user = true;
1726
1727        if (posted) {
1728                atomic_add(posted, &rxq->used);
1729                be_rxq_notify(adapter, rxq->id, posted);
1730        } else if (atomic_read(&rxq->used) == 0) {
1731                /* Let be_worker replenish when memory is available */
1732                rxo->rx_post_starved = true;
1733        }
1734}
1735
1736static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1737{
1738        struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1739
1740        if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1741                return NULL;
1742
1743        rmb();
1744        be_dws_le_to_cpu(txcp, sizeof(*txcp));
1745
1746        txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1747
1748        queue_tail_inc(tx_cq);
1749        return txcp;
1750}
1751
1752static u16 be_tx_compl_process(struct be_adapter *adapter,
1753                struct be_tx_obj *txo, u16 last_index)
1754{
1755        struct be_queue_info *txq = &txo->q;
1756        struct be_eth_wrb *wrb;
1757        struct sk_buff **sent_skbs = txo->sent_skb_list;
1758        struct sk_buff *sent_skb;
1759        u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1760        bool unmap_skb_hdr = true;
1761
1762        sent_skb = sent_skbs[txq->tail];
1763        BUG_ON(!sent_skb);
1764        sent_skbs[txq->tail] = NULL;
1765
1766        /* skip header wrb */
1767        queue_tail_inc(txq);
1768
1769        do {
1770                cur_index = txq->tail;
1771                wrb = queue_tail_node(txq);
1772                unmap_tx_frag(&adapter->pdev->dev, wrb,
1773                              (unmap_skb_hdr && skb_headlen(sent_skb)));
1774                unmap_skb_hdr = false;
1775
1776                num_wrbs++;
1777                queue_tail_inc(txq);
1778        } while (cur_index != last_index);
1779
1780        kfree_skb(sent_skb);
1781        return num_wrbs;
1782}
1783
1784/* Return the number of events in the event queue */
1785static inline int events_get(struct be_eq_obj *eqo)
1786{
1787        struct be_eq_entry *eqe;
1788        int num = 0;
1789
1790        do {
1791                eqe = queue_tail_node(&eqo->q);
1792                if (eqe->evt == 0)
1793                        break;
1794
1795                rmb();
1796                eqe->evt = 0;
1797                num++;
1798                queue_tail_inc(&eqo->q);
1799        } while (true);
1800
1801        return num;
1802}
1803
1804/* Leaves the EQ is disarmed state */
1805static void be_eq_clean(struct be_eq_obj *eqo)
1806{
1807        int num = events_get(eqo);
1808
1809        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1810}
1811
1812static void be_rx_cq_clean(struct be_rx_obj *rxo)
1813{
1814        struct be_rx_page_info *page_info;
1815        struct be_queue_info *rxq = &rxo->q;
1816        struct be_queue_info *rx_cq = &rxo->cq;
1817        struct be_rx_compl_info *rxcp;
1818        struct be_adapter *adapter = rxo->adapter;
1819        int flush_wait = 0;
1820        u16 tail;
1821
1822        /* Consume pending rx completions.
1823         * Wait for the flush completion (identified by zero num_rcvd)
1824         * to arrive. Notify CQ even when there are no more CQ entries
1825         * for HW to flush partially coalesced CQ entries.
1826         * In Lancer, there is no need to wait for flush compl.
1827         */
1828        for (;;) {
1829                rxcp = be_rx_compl_get(rxo);
1830                if (rxcp == NULL) {
1831                        if (lancer_chip(adapter))
1832                                break;
1833
1834                        if (flush_wait++ > 10 || be_hw_error(adapter)) {
1835                                dev_warn(&adapter->pdev->dev,
1836                                         "did not receive flush compl\n");
1837                                break;
1838                        }
1839                        be_cq_notify(adapter, rx_cq->id, true, 0);
1840                        mdelay(1);
1841                } else {
1842                        be_rx_compl_discard(rxo, rxcp);
1843                        be_cq_notify(adapter, rx_cq->id, false, 1);
1844                        if (rxcp->num_rcvd == 0)
1845                                break;
1846                }
1847        }
1848
1849        /* After cleanup, leave the CQ in unarmed state */
1850        be_cq_notify(adapter, rx_cq->id, false, 0);
1851
1852        /* Then free posted rx buffers that were not used */
1853        tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1854        for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1855                page_info = get_rx_page_info(rxo, tail);
1856                put_page(page_info->page);
1857                memset(page_info, 0, sizeof(*page_info));
1858        }
1859        BUG_ON(atomic_read(&rxq->used));
1860        rxq->tail = rxq->head = 0;
1861}
1862
1863static void be_tx_compl_clean(struct be_adapter *adapter)
1864{
1865        struct be_tx_obj *txo;
1866        struct be_queue_info *txq;
1867        struct be_eth_tx_compl *txcp;
1868        u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1869        struct sk_buff *sent_skb;
1870        bool dummy_wrb;
1871        int i, pending_txqs;
1872
1873        /* Wait for a max of 200ms for all the tx-completions to arrive. */
1874        do {
1875                pending_txqs = adapter->num_tx_qs;
1876
1877                for_all_tx_queues(adapter, txo, i) {
1878                        txq = &txo->q;
1879                        while ((txcp = be_tx_compl_get(&txo->cq))) {
1880                                end_idx =
1881                                        AMAP_GET_BITS(struct amap_eth_tx_compl,
1882                                                      wrb_index, txcp);
1883                                num_wrbs += be_tx_compl_process(adapter, txo,
1884                                                                end_idx);
1885                                cmpl++;
1886                        }
1887                        if (cmpl) {
1888                                be_cq_notify(adapter, txo->cq.id, false, cmpl);
1889                                atomic_sub(num_wrbs, &txq->used);
1890                                cmpl = 0;
1891                                num_wrbs = 0;
1892                        }
1893                        if (atomic_read(&txq->used) == 0)
1894                                pending_txqs--;
1895                }
1896
1897                if (pending_txqs == 0 || ++timeo > 200)
1898                        break;
1899
1900                mdelay(1);
1901        } while (true);
1902
1903        for_all_tx_queues(adapter, txo, i) {
1904                txq = &txo->q;
1905                if (atomic_read(&txq->used))
1906                        dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1907                                atomic_read(&txq->used));
1908
1909                /* free posted tx for which compls will never arrive */
1910                while (atomic_read(&txq->used)) {
1911                        sent_skb = txo->sent_skb_list[txq->tail];
1912                        end_idx = txq->tail;
1913                        num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1914                                                   &dummy_wrb);
1915                        index_adv(&end_idx, num_wrbs - 1, txq->len);
1916                        num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1917                        atomic_sub(num_wrbs, &txq->used);
1918                }
1919        }
1920}
1921
1922static void be_evt_queues_destroy(struct be_adapter *adapter)
1923{
1924        struct be_eq_obj *eqo;
1925        int i;
1926
1927        for_all_evt_queues(adapter, eqo, i) {
1928                if (eqo->q.created) {
1929                        be_eq_clean(eqo);
1930                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1931                        netif_napi_del(&eqo->napi);
1932                }
1933                be_queue_free(adapter, &eqo->q);
1934        }
1935}
1936
1937static int be_evt_queues_create(struct be_adapter *adapter)
1938{
1939        struct be_queue_info *eq;
1940        struct be_eq_obj *eqo;
1941        int i, rc;
1942
1943        adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1944                                    adapter->cfg_num_qs);
1945
1946        for_all_evt_queues(adapter, eqo, i) {
1947                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1948                               BE_NAPI_WEIGHT);
1949                eqo->adapter = adapter;
1950                eqo->tx_budget = BE_TX_BUDGET;
1951                eqo->idx = i;
1952                eqo->max_eqd = BE_MAX_EQD;
1953                eqo->enable_aic = true;
1954
1955                eq = &eqo->q;
1956                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1957                                        sizeof(struct be_eq_entry));
1958                if (rc)
1959                        return rc;
1960
1961                rc = be_cmd_eq_create(adapter, eqo);
1962                if (rc)
1963                        return rc;
1964        }
1965        return 0;
1966}
1967
1968static void be_mcc_queues_destroy(struct be_adapter *adapter)
1969{
1970        struct be_queue_info *q;
1971
1972        q = &adapter->mcc_obj.q;
1973        if (q->created)
1974                be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1975        be_queue_free(adapter, q);
1976
1977        q = &adapter->mcc_obj.cq;
1978        if (q->created)
1979                be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980        be_queue_free(adapter, q);
1981}
1982
1983/* Must be called only after TX qs are created as MCC shares TX EQ */
1984static int be_mcc_queues_create(struct be_adapter *adapter)
1985{
1986        struct be_queue_info *q, *cq;
1987
1988        cq = &adapter->mcc_obj.cq;
1989        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1990                        sizeof(struct be_mcc_compl)))
1991                goto err;
1992
1993        /* Use the default EQ for MCC completions */
1994        if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1995                goto mcc_cq_free;
1996
1997        q = &adapter->mcc_obj.q;
1998        if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1999                goto mcc_cq_destroy;
2000
2001        if (be_cmd_mccq_create(adapter, q, cq))
2002                goto mcc_q_free;
2003
2004        return 0;
2005
2006mcc_q_free:
2007        be_queue_free(adapter, q);
2008mcc_cq_destroy:
2009        be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2010mcc_cq_free:
2011        be_queue_free(adapter, cq);
2012err:
2013        return -1;
2014}
2015
2016static void be_tx_queues_destroy(struct be_adapter *adapter)
2017{
2018        struct be_queue_info *q;
2019        struct be_tx_obj *txo;
2020        u8 i;
2021
2022        for_all_tx_queues(adapter, txo, i) {
2023                q = &txo->q;
2024                if (q->created)
2025                        be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2026                be_queue_free(adapter, q);
2027
2028                q = &txo->cq;
2029                if (q->created)
2030                        be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2031                be_queue_free(adapter, q);
2032        }
2033}
2034
2035static int be_tx_qs_create(struct be_adapter *adapter)
2036{
2037        struct be_queue_info *cq, *eq;
2038        struct be_tx_obj *txo;
2039        int status, i;
2040
2041        adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2042
2043        for_all_tx_queues(adapter, txo, i) {
2044                cq = &txo->cq;
2045                status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046                                        sizeof(struct be_eth_tx_compl));
2047                if (status)
2048                        return status;
2049
2050                /* If num_evt_qs is less than num_tx_qs, then more than
2051                 * one txq share an eq
2052                 */
2053                eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054                status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055                if (status)
2056                        return status;
2057
2058                status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2059                                        sizeof(struct be_eth_wrb));
2060                if (status)
2061                        return status;
2062
2063                status = be_cmd_txq_create(adapter, txo);
2064                if (status)
2065                        return status;
2066        }
2067
2068        dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2069                 adapter->num_tx_qs);
2070        return 0;
2071}
2072
2073static void be_rx_cqs_destroy(struct be_adapter *adapter)
2074{
2075        struct be_queue_info *q;
2076        struct be_rx_obj *rxo;
2077        int i;
2078
2079        for_all_rx_queues(adapter, rxo, i) {
2080                q = &rxo->cq;
2081                if (q->created)
2082                        be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2083                be_queue_free(adapter, q);
2084        }
2085}
2086
2087static int be_rx_cqs_create(struct be_adapter *adapter)
2088{
2089        struct be_queue_info *eq, *cq;
2090        struct be_rx_obj *rxo;
2091        int rc, i;
2092
2093        /* We can create as many RSS rings as there are EQs. */
2094        adapter->num_rx_qs = adapter->num_evt_qs;
2095
2096        /* We'll use RSS only if atleast 2 RSS rings are supported.
2097         * When RSS is used, we'll need a default RXQ for non-IP traffic.
2098         */
2099        if (adapter->num_rx_qs > 1)
2100                adapter->num_rx_qs++;
2101
2102        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2103        for_all_rx_queues(adapter, rxo, i) {
2104                rxo->adapter = adapter;
2105                cq = &rxo->cq;
2106                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2107                                sizeof(struct be_eth_rx_compl));
2108                if (rc)
2109                        return rc;
2110
2111                eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2112                rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2113                if (rc)
2114                        return rc;
2115        }
2116
2117        dev_info(&adapter->pdev->dev,
2118                 "created %d RSS queue(s) and 1 default RX queue\n",
2119                 adapter->num_rx_qs - 1);
2120        return 0;
2121}
2122
2123static irqreturn_t be_intx(int irq, void *dev)
2124{
2125        struct be_eq_obj *eqo = dev;
2126        struct be_adapter *adapter = eqo->adapter;
2127        int num_evts = 0;
2128
2129        /* IRQ is not expected when NAPI is scheduled as the EQ
2130         * will not be armed.
2131         * But, this can happen on Lancer INTx where it takes
2132         * a while to de-assert INTx or in BE2 where occasionaly
2133         * an interrupt may be raised even when EQ is unarmed.
2134         * If NAPI is already scheduled, then counting & notifying
2135         * events will orphan them.
2136         */
2137        if (napi_schedule_prep(&eqo->napi)) {
2138                num_evts = events_get(eqo);
2139                __napi_schedule(&eqo->napi);
2140                if (num_evts)
2141                        eqo->spurious_intr = 0;
2142        }
2143        be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2144
2145        /* Return IRQ_HANDLED only for the the first spurious intr
2146         * after a valid intr to stop the kernel from branding
2147         * this irq as a bad one!
2148         */
2149        if (num_evts || eqo->spurious_intr++ == 0)
2150                return IRQ_HANDLED;
2151        else
2152                return IRQ_NONE;
2153}
2154
2155static irqreturn_t be_msix(int irq, void *dev)
2156{
2157        struct be_eq_obj *eqo = dev;
2158
2159        be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2160        napi_schedule(&eqo->napi);
2161        return IRQ_HANDLED;
2162}
2163
2164static inline bool do_gro(struct be_rx_compl_info *rxcp)
2165{
2166        return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2167}
2168
2169static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2170                        int budget)
2171{
2172        struct be_adapter *adapter = rxo->adapter;
2173        struct be_queue_info *rx_cq = &rxo->cq;
2174        struct be_rx_compl_info *rxcp;
2175        u32 work_done;
2176
2177        for (work_done = 0; work_done < budget; work_done++) {
2178                rxcp = be_rx_compl_get(rxo);
2179                if (!rxcp)
2180                        break;
2181
2182                /* Is it a flush compl that has no data */
2183                if (unlikely(rxcp->num_rcvd == 0))
2184                        goto loop_continue;
2185
2186                /* Discard compl with partial DMA Lancer B0 */
2187                if (unlikely(!rxcp->pkt_size)) {
2188                        be_rx_compl_discard(rxo, rxcp);
2189                        goto loop_continue;
2190                }
2191
2192                /* On BE drop pkts that arrive due to imperfect filtering in
2193                 * promiscuous mode on some skews
2194                 */
2195                if (unlikely(rxcp->port != adapter->port_num &&
2196                                !lancer_chip(adapter))) {
2197                        be_rx_compl_discard(rxo, rxcp);
2198                        goto loop_continue;
2199                }
2200
2201                if (do_gro(rxcp))
2202                        be_rx_compl_process_gro(rxo, napi, rxcp);
2203                else
2204                        be_rx_compl_process(rxo, rxcp);
2205loop_continue:
2206                be_rx_stats_update(rxo, rxcp);
2207        }
2208
2209        if (work_done) {
2210                be_cq_notify(adapter, rx_cq->id, true, work_done);
2211
2212                if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2213                        be_post_rx_frags(rxo, GFP_ATOMIC);
2214        }
2215
2216        return work_done;
2217}
2218
2219static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2220                          int budget, int idx)
2221{
2222        struct be_eth_tx_compl *txcp;
2223        int num_wrbs = 0, work_done;
2224
2225        for (work_done = 0; work_done < budget; work_done++) {
2226                txcp = be_tx_compl_get(&txo->cq);
2227                if (!txcp)
2228                        break;
2229                num_wrbs += be_tx_compl_process(adapter, txo,
2230                                AMAP_GET_BITS(struct amap_eth_tx_compl,
2231                                        wrb_index, txcp));
2232        }
2233
2234        if (work_done) {
2235                be_cq_notify(adapter, txo->cq.id, true, work_done);
2236                atomic_sub(num_wrbs, &txo->q.used);
2237
2238                /* As Tx wrbs have been freed up, wake up netdev queue
2239                 * if it was stopped due to lack of tx wrbs.  */
2240                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2241                        atomic_read(&txo->q.used) < txo->q.len / 2) {
2242                        netif_wake_subqueue(adapter->netdev, idx);
2243                }
2244
2245                u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2246                tx_stats(txo)->tx_compl += work_done;
2247                u64_stats_update_end(&tx_stats(txo)->sync_compl);
2248        }
2249        return (work_done < budget); /* Done */
2250}
2251
2252int be_poll(struct napi_struct *napi, int budget)
2253{
2254        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2255        struct be_adapter *adapter = eqo->adapter;
2256        int max_work = 0, work, i, num_evts;
2257        bool tx_done;
2258
2259        num_evts = events_get(eqo);
2260
2261        /* Process all TXQs serviced by this EQ */
2262        for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2263                tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2264                                        eqo->tx_budget, i);
2265                if (!tx_done)
2266                        max_work = budget;
2267        }
2268
2269        /* This loop will iterate twice for EQ0 in which
2270         * completions of the last RXQ (default one) are also processed
2271         * For other EQs the loop iterates only once
2272         */
2273        for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2274                work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2275                max_work = max(work, max_work);
2276        }
2277
2278        if (is_mcc_eqo(eqo))
2279                be_process_mcc(adapter);
2280
2281        if (max_work < budget) {
2282                napi_complete(napi);
2283                be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2284        } else {
2285                /* As we'll continue in polling mode, count and clear events */
2286                be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2287        }
2288        return max_work;
2289}
2290
2291void be_detect_error(struct be_adapter *adapter)
2292{
2293        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2294        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2295        u32 i;
2296
2297        if (be_hw_error(adapter))
2298                return;
2299
2300        if (lancer_chip(adapter)) {
2301                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2302                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2303                        sliport_err1 = ioread32(adapter->db +
2304                                        SLIPORT_ERROR1_OFFSET);
2305                        sliport_err2 = ioread32(adapter->db +
2306                                        SLIPORT_ERROR2_OFFSET);
2307                }
2308        } else {
2309                pci_read_config_dword(adapter->pdev,
2310                                PCICFG_UE_STATUS_LOW, &ue_lo);
2311                pci_read_config_dword(adapter->pdev,
2312                                PCICFG_UE_STATUS_HIGH, &ue_hi);
2313                pci_read_config_dword(adapter->pdev,
2314                                PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2315                pci_read_config_dword(adapter->pdev,
2316                                PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2317
2318                ue_lo = (ue_lo & ~ue_lo_mask);
2319                ue_hi = (ue_hi & ~ue_hi_mask);
2320        }
2321
2322        /* On certain platforms BE hardware can indicate spurious UEs.
2323         * Allow the h/w to stop working completely in case of a real UE.
2324         * Hence not setting the hw_error for UE detection.
2325         */
2326        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2327                adapter->hw_error = true;
2328                dev_err(&adapter->pdev->dev,
2329                        "Error detected in the card\n");
2330        }
2331
2332        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2333                dev_err(&adapter->pdev->dev,
2334                        "ERR: sliport status 0x%x\n", sliport_status);
2335                dev_err(&adapter->pdev->dev,
2336                        "ERR: sliport error1 0x%x\n", sliport_err1);
2337                dev_err(&adapter->pdev->dev,
2338                        "ERR: sliport error2 0x%x\n", sliport_err2);
2339        }
2340
2341        if (ue_lo) {
2342                for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2343                        if (ue_lo & 1)
2344                                dev_err(&adapter->pdev->dev,
2345                                "UE: %s bit set\n", ue_status_low_desc[i]);
2346                }
2347        }
2348
2349        if (ue_hi) {
2350                for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2351                        if (ue_hi & 1)
2352                                dev_err(&adapter->pdev->dev,
2353                                "UE: %s bit set\n", ue_status_hi_desc[i]);
2354                }
2355        }
2356
2357}
2358
2359static void be_msix_disable(struct be_adapter *adapter)
2360{
2361        if (msix_enabled(adapter)) {
2362                pci_disable_msix(adapter->pdev);
2363                adapter->num_msix_vec = 0;
2364                adapter->num_msix_roce_vec = 0;
2365        }
2366}
2367
2368static int be_msix_enable(struct be_adapter *adapter)
2369{
2370        int i, status, num_vec;
2371        struct device *dev = &adapter->pdev->dev;
2372
2373        /* If RoCE is supported, program the max number of NIC vectors that
2374         * may be configured via set-channels, along with vectors needed for
2375         * RoCe. Else, just program the number we'll use initially.
2376         */
2377        if (be_roce_supported(adapter))
2378                num_vec = min_t(int, 2 * be_max_eqs(adapter),
2379                                2 * num_online_cpus());
2380        else
2381                num_vec = adapter->cfg_num_qs;
2382
2383        for (i = 0; i < num_vec; i++)
2384                adapter->msix_entries[i].entry = i;
2385
2386        status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2387        if (status == 0) {
2388                goto done;
2389        } else if (status >= MIN_MSIX_VECTORS) {
2390                num_vec = status;
2391                status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2392                                         num_vec);
2393                if (!status)
2394                        goto done;
2395        }
2396
2397        dev_warn(dev, "MSIx enable failed\n");
2398
2399        /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2400        if (!be_physfn(adapter))
2401                return status;
2402        return 0;
2403done:
2404        if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2405                adapter->num_msix_roce_vec = num_vec / 2;
2406                dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2407                         adapter->num_msix_roce_vec);
2408        }
2409
2410        adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2411
2412        dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2413                 adapter->num_msix_vec);
2414        return 0;
2415}
2416
2417static inline int be_msix_vec_get(struct be_adapter *adapter,
2418                                struct be_eq_obj *eqo)
2419{
2420        return adapter->msix_entries[eqo->msix_idx].vector;
2421}
2422
2423static int be_msix_register(struct be_adapter *adapter)
2424{
2425        struct net_device *netdev = adapter->netdev;
2426        struct be_eq_obj *eqo;
2427        int status, i, vec;
2428
2429        for_all_evt_queues(adapter, eqo, i) {
2430                sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2431                vec = be_msix_vec_get(adapter, eqo);
2432                status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2433                if (status)
2434                        goto err_msix;
2435        }
2436
2437        return 0;
2438err_msix:
2439        for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2440                free_irq(be_msix_vec_get(adapter, eqo), eqo);
2441        dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2442                status);
2443        be_msix_disable(adapter);
2444        return status;
2445}
2446
2447static int be_irq_register(struct be_adapter *adapter)
2448{
2449        struct net_device *netdev = adapter->netdev;
2450        int status;
2451
2452        if (msix_enabled(adapter)) {
2453                status = be_msix_register(adapter);
2454                if (status == 0)
2455                        goto done;
2456                /* INTx is not supported for VF */
2457                if (!be_physfn(adapter))
2458                        return status;
2459        }
2460
2461        /* INTx: only the first EQ is used */
2462        netdev->irq = adapter->pdev->irq;
2463        status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2464                             &adapter->eq_obj[0]);
2465        if (status) {
2466                dev_err(&adapter->pdev->dev,
2467                        "INTx request IRQ failed - err %d\n", status);
2468                return status;
2469        }
2470done:
2471        adapter->isr_registered = true;
2472        return 0;
2473}
2474
2475static void be_irq_unregister(struct be_adapter *adapter)
2476{
2477        struct net_device *netdev = adapter->netdev;
2478        struct be_eq_obj *eqo;
2479        int i;
2480
2481        if (!adapter->isr_registered)
2482                return;
2483
2484        /* INTx */
2485        if (!msix_enabled(adapter)) {
2486                free_irq(netdev->irq, &adapter->eq_obj[0]);
2487                goto done;
2488        }
2489
2490        /* MSIx */
2491        for_all_evt_queues(adapter, eqo, i)
2492                free_irq(be_msix_vec_get(adapter, eqo), eqo);
2493
2494done:
2495        adapter->isr_registered = false;
2496}
2497
2498static void be_rx_qs_destroy(struct be_adapter *adapter)
2499{
2500        struct be_queue_info *q;
2501        struct be_rx_obj *rxo;
2502        int i;
2503
2504        for_all_rx_queues(adapter, rxo, i) {
2505                q = &rxo->q;
2506                if (q->created) {
2507                        be_cmd_rxq_destroy(adapter, q);
2508                        be_rx_cq_clean(rxo);
2509                }
2510                be_queue_free(adapter, q);
2511        }
2512}
2513
2514static int be_close(struct net_device *netdev)
2515{
2516        struct be_adapter *adapter = netdev_priv(netdev);
2517        struct be_eq_obj *eqo;
2518        int i;
2519
2520        be_roce_dev_close(adapter);
2521
2522        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2523                for_all_evt_queues(adapter, eqo, i)
2524                        napi_disable(&eqo->napi);
2525                adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2526        }
2527
2528        be_async_mcc_disable(adapter);
2529
2530        /* Wait for all pending tx completions to arrive so that
2531         * all tx skbs are freed.
2532         */
2533        netif_tx_disable(netdev);
2534        be_tx_compl_clean(adapter);
2535
2536        be_rx_qs_destroy(adapter);
2537
2538        for_all_evt_queues(adapter, eqo, i) {
2539                if (msix_enabled(adapter))
2540                        synchronize_irq(be_msix_vec_get(adapter, eqo));
2541                else
2542                        synchronize_irq(netdev->irq);
2543                be_eq_clean(eqo);
2544        }
2545
2546        be_irq_unregister(adapter);
2547
2548        return 0;
2549}
2550
2551static int be_rx_qs_create(struct be_adapter *adapter)
2552{
2553        struct be_rx_obj *rxo;
2554        int rc, i, j;
2555        u8 rsstable[128];
2556
2557        for_all_rx_queues(adapter, rxo, i) {
2558                rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2559                                    sizeof(struct be_eth_rx_d));
2560                if (rc)
2561                        return rc;
2562        }
2563
2564        /* The FW would like the default RXQ to be created first */
2565        rxo = default_rxo(adapter);
2566        rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2567                               adapter->if_handle, false, &rxo->rss_id);
2568        if (rc)
2569                return rc;
2570
2571        for_all_rss_queues(adapter, rxo, i) {
2572                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2573                                       rx_frag_size, adapter->if_handle,
2574                                       true, &rxo->rss_id);
2575                if (rc)
2576                        return rc;
2577        }
2578
2579        if (be_multi_rxq(adapter)) {
2580                for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2581                        for_all_rss_queues(adapter, rxo, i) {
2582                                if ((j + i) >= 128)
2583                                        break;
2584                                rsstable[j + i] = rxo->rss_id;
2585                        }
2586                }
2587                adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2588                                        RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2589
2590                if (!BEx_chip(adapter))
2591                        adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2592                                                RSS_ENABLE_UDP_IPV6;
2593
2594                rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2595                                       128);
2596                if (rc) {
2597                        adapter->rss_flags = 0;
2598                        return rc;
2599                }
2600        }
2601
2602        /* First time posting */
2603        for_all_rx_queues(adapter, rxo, i)
2604                be_post_rx_frags(rxo, GFP_KERNEL);
2605        return 0;
2606}
2607
2608static int be_open(struct net_device *netdev)
2609{
2610        struct be_adapter *adapter = netdev_priv(netdev);
2611        struct be_eq_obj *eqo;
2612        struct be_rx_obj *rxo;
2613        struct be_tx_obj *txo;
2614        u8 link_status;
2615        int status, i;
2616
2617        status = be_rx_qs_create(adapter);
2618        if (status)
2619                goto err;
2620
2621        status = be_irq_register(adapter);
2622        if (status)
2623                goto err;
2624
2625        for_all_rx_queues(adapter, rxo, i)
2626                be_cq_notify(adapter, rxo->cq.id, true, 0);
2627
2628        for_all_tx_queues(adapter, txo, i)
2629                be_cq_notify(adapter, txo->cq.id, true, 0);
2630
2631        be_async_mcc_enable(adapter);
2632
2633        for_all_evt_queues(adapter, eqo, i) {
2634                napi_enable(&eqo->napi);
2635                be_eq_notify(adapter, eqo->q.id, true, false, 0);
2636        }
2637        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2638
2639        status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2640        if (!status)
2641                be_link_status_update(adapter, link_status);
2642
2643        netif_tx_start_all_queues(netdev);
2644        be_roce_dev_open(adapter);
2645        return 0;
2646err:
2647        be_close(adapter->netdev);
2648        return -EIO;
2649}
2650
2651static int be_setup_wol(struct be_adapter *adapter, bool enable)
2652{
2653        struct be_dma_mem cmd;
2654        int status = 0;
2655        u8 mac[ETH_ALEN];
2656
2657        memset(mac, 0, ETH_ALEN);
2658
2659        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2660        cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2661                                     GFP_KERNEL);
2662        if (cmd.va == NULL)
2663                return -1;
2664
2665        if (enable) {
2666                status = pci_write_config_dword(adapter->pdev,
2667                        PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2668                if (status) {
2669                        dev_err(&adapter->pdev->dev,
2670                                "Could not enable Wake-on-lan\n");
2671                        dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2672                                          cmd.dma);
2673                        return status;
2674                }
2675                status = be_cmd_enable_magic_wol(adapter,
2676                                adapter->netdev->dev_addr, &cmd);
2677                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2678                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2679        } else {
2680                status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2681                pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2682                pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2683        }
2684
2685        dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2686        return status;
2687}
2688
2689/*
2690 * Generate a seed MAC address from the PF MAC Address using jhash.
2691 * MAC Address for VFs are assigned incrementally starting from the seed.
2692 * These addresses are programmed in the ASIC by the PF and the VF driver
2693 * queries for the MAC address during its probe.
2694 */
2695static int be_vf_eth_addr_config(struct be_adapter *adapter)
2696{
2697        u32 vf;
2698        int status = 0;
2699        u8 mac[ETH_ALEN];
2700        struct be_vf_cfg *vf_cfg;
2701
2702        be_vf_eth_addr_generate(adapter, mac);
2703
2704        for_all_vfs(adapter, vf_cfg, vf) {
2705                if (BEx_chip(adapter))
2706                        status = be_cmd_pmac_add(adapter, mac,
2707                                                 vf_cfg->if_handle,
2708                                                 &vf_cfg->pmac_id, vf + 1);
2709                else
2710                        status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2711                                                vf + 1);
2712
2713                if (status)
2714                        dev_err(&adapter->pdev->dev,
2715                        "Mac address assignment failed for VF %d\n", vf);
2716                else
2717                        memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2718
2719                mac[5] += 1;
2720        }
2721        return status;
2722}
2723
2724static int be_vfs_mac_query(struct be_adapter *adapter)
2725{
2726        int status, vf;
2727        u8 mac[ETH_ALEN];
2728        struct be_vf_cfg *vf_cfg;
2729        bool active = false;
2730
2731        for_all_vfs(adapter, vf_cfg, vf) {
2732                be_cmd_get_mac_from_list(adapter, mac, &active,
2733                                         &vf_cfg->pmac_id, 0);
2734
2735                status = be_cmd_mac_addr_query(adapter, mac, false,
2736                                               vf_cfg->if_handle, 0);
2737                if (status)
2738                        return status;
2739                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2740        }
2741        return 0;
2742}
2743
2744static void be_vf_clear(struct be_adapter *adapter)
2745{
2746        struct be_vf_cfg *vf_cfg;
2747        u32 vf;
2748
2749        if (pci_vfs_assigned(adapter->pdev)) {
2750                dev_warn(&adapter->pdev->dev,
2751                         "VFs are assigned to VMs: not disabling VFs\n");
2752                goto done;
2753        }
2754
2755        pci_disable_sriov(adapter->pdev);
2756
2757        for_all_vfs(adapter, vf_cfg, vf) {
2758                if (BEx_chip(adapter))
2759                        be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2760                                        vf_cfg->pmac_id, vf + 1);
2761                else
2762                        be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2763                                       vf + 1);
2764
2765                be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2766        }
2767done:
2768        kfree(adapter->vf_cfg);
2769        adapter->num_vfs = 0;
2770}
2771
2772static void be_clear_queues(struct be_adapter *adapter)
2773{
2774        be_mcc_queues_destroy(adapter);
2775        be_rx_cqs_destroy(adapter);
2776        be_tx_queues_destroy(adapter);
2777        be_evt_queues_destroy(adapter);
2778}
2779
2780static void be_cancel_worker(struct be_adapter *adapter)
2781{
2782        if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2783                cancel_delayed_work_sync(&adapter->work);
2784                adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2785        }
2786}
2787
2788static int be_clear(struct be_adapter *adapter)
2789{
2790        int i;
2791
2792        be_cancel_worker(adapter);
2793
2794        if (sriov_enabled(adapter))
2795                be_vf_clear(adapter);
2796
2797        /* delete the primary mac along with the uc-mac list */
2798        for (i = 0; i < (adapter->uc_macs + 1); i++)
2799                be_cmd_pmac_del(adapter, adapter->if_handle,
2800                                adapter->pmac_id[i], 0);
2801        adapter->uc_macs = 0;
2802
2803        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2804
2805        be_clear_queues(adapter);
2806
2807        kfree(adapter->pmac_id);
2808        adapter->pmac_id = NULL;
2809
2810        be_msix_disable(adapter);
2811        return 0;
2812}
2813
2814static int be_vfs_if_create(struct be_adapter *adapter)
2815{
2816        struct be_resources res = {0};
2817        struct be_vf_cfg *vf_cfg;
2818        u32 cap_flags, en_flags, vf;
2819        int status = 0;
2820
2821        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2822                    BE_IF_FLAGS_MULTICAST;
2823
2824        for_all_vfs(adapter, vf_cfg, vf) {
2825                if (!BE3_chip(adapter)) {
2826                        status = be_cmd_get_profile_config(adapter, &res,
2827                                                           vf + 1);
2828                        if (!status)
2829                                cap_flags = res.if_cap_flags;
2830                }
2831
2832                /* If a FW profile exists, then cap_flags are updated */
2833                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2834                           BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2835                status = be_cmd_if_create(adapter, cap_flags, en_flags,
2836                                          &vf_cfg->if_handle, vf + 1);
2837                if (status)
2838                        goto err;
2839        }
2840err:
2841        return status;
2842}
2843
2844static int be_vf_setup_init(struct be_adapter *adapter)
2845{
2846        struct be_vf_cfg *vf_cfg;
2847        int vf;
2848
2849        adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2850                                  GFP_KERNEL);
2851        if (!adapter->vf_cfg)
2852                return -ENOMEM;
2853
2854        for_all_vfs(adapter, vf_cfg, vf) {
2855                vf_cfg->if_handle = -1;
2856                vf_cfg->pmac_id = -1;
2857        }
2858        return 0;
2859}
2860
2861static int be_vf_setup(struct be_adapter *adapter)
2862{
2863        struct be_vf_cfg *vf_cfg;
2864        u16 def_vlan, lnk_speed;
2865        int status, old_vfs, vf;
2866        struct device *dev = &adapter->pdev->dev;
2867        u32 privileges;
2868
2869        old_vfs = pci_num_vf(adapter->pdev);
2870        if (old_vfs) {
2871                dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2872                if (old_vfs != num_vfs)
2873                        dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2874                adapter->num_vfs = old_vfs;
2875        } else {
2876                if (num_vfs > be_max_vfs(adapter))
2877                        dev_info(dev, "Device supports %d VFs and not %d\n",
2878                                 be_max_vfs(adapter), num_vfs);
2879                adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2880                if (!adapter->num_vfs)
2881                        return 0;
2882        }
2883
2884        status = be_vf_setup_init(adapter);
2885        if (status)
2886                goto err;
2887
2888        if (old_vfs) {
2889                for_all_vfs(adapter, vf_cfg, vf) {
2890                        status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2891                        if (status)
2892                                goto err;
2893                }
2894        } else {
2895                status = be_vfs_if_create(adapter);
2896                if (status)
2897                        goto err;
2898        }
2899
2900        if (old_vfs) {
2901                status = be_vfs_mac_query(adapter);
2902                if (status)
2903                        goto err;
2904        } else {
2905                status = be_vf_eth_addr_config(adapter);
2906                if (status)
2907                        goto err;
2908        }
2909
2910        for_all_vfs(adapter, vf_cfg, vf) {
2911                /* Allow VFs to programs MAC/VLAN filters */
2912                status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2913                if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2914                        status = be_cmd_set_fn_privileges(adapter,
2915                                                          privileges |
2916                                                          BE_PRIV_FILTMGMT,
2917                                                          vf + 1);
2918                        if (!status)
2919                                dev_info(dev, "VF%d has FILTMGMT privilege\n",
2920                                         vf);
2921                }
2922
2923                /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2924                 * Allow full available bandwidth
2925                 */
2926                if (BE3_chip(adapter) && !old_vfs)
2927                        be_cmd_set_qos(adapter, 1000, vf+1);
2928
2929                status = be_cmd_link_status_query(adapter, &lnk_speed,
2930                                                  NULL, vf + 1);
2931                if (!status)
2932                        vf_cfg->tx_rate = lnk_speed;
2933
2934                status = be_cmd_get_hsw_config(adapter, &def_vlan,
2935                                               vf + 1, vf_cfg->if_handle, NULL);
2936                if (status)
2937                        goto err;
2938                vf_cfg->def_vid = def_vlan;
2939
2940                be_cmd_enable_vf(adapter, vf + 1);
2941        }
2942
2943        if (!old_vfs) {
2944                status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2945                if (status) {
2946                        dev_err(dev, "SRIOV enable failed\n");
2947                        adapter->num_vfs = 0;
2948                        goto err;
2949                }
2950        }
2951        return 0;
2952err:
2953        dev_err(dev, "VF setup failed\n");
2954        be_vf_clear(adapter);
2955        return status;
2956}
2957
2958/* On BE2/BE3 FW does not suggest the supported limits */
2959static void BEx_get_resources(struct be_adapter *adapter,
2960                              struct be_resources *res)
2961{
2962        struct pci_dev *pdev = adapter->pdev;
2963        bool use_sriov = false;
2964
2965        if (BE3_chip(adapter) && be_physfn(adapter)) {
2966                int max_vfs;
2967
2968                max_vfs = pci_sriov_get_totalvfs(pdev);
2969                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2970                use_sriov = res->max_vfs && num_vfs;
2971        }
2972
2973        if (be_physfn(adapter))
2974                res->max_uc_mac = BE_UC_PMAC_COUNT;
2975        else
2976                res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2977
2978        if (adapter->function_mode & FLEX10_MODE)
2979                res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980        else if (adapter->function_mode & UMC_ENABLED)
2981                res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2982        else
2983                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2984        res->max_mcast_mac = BE_MAX_MC;
2985
2986        if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2987            !be_physfn(adapter))
2988                res->max_tx_qs = 1;
2989        else
2990                res->max_tx_qs = BE3_MAX_TX_QS;
2991
2992        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2993            !use_sriov && be_physfn(adapter))
2994                res->max_rss_qs = (adapter->be3_native) ?
2995                                           BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2996        res->max_rx_qs = res->max_rss_qs + 1;
2997
2998        res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
2999
3000        res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3001        if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3002                res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3003}
3004
3005static void be_setup_init(struct be_adapter *adapter)
3006{
3007        adapter->vlan_prio_bmap = 0xff;
3008        adapter->phy.link_speed = -1;
3009        adapter->if_handle = -1;
3010        adapter->be3_native = false;
3011        adapter->promiscuous = false;
3012        if (be_physfn(adapter))
3013                adapter->cmd_privileges = MAX_PRIVILEGES;
3014        else
3015                adapter->cmd_privileges = MIN_PRIVILEGES;
3016}
3017
3018static int be_get_resources(struct be_adapter *adapter)
3019{
3020        struct device *dev = &adapter->pdev->dev;
3021        struct be_resources res = {0};
3022        int status;
3023
3024        if (BEx_chip(adapter)) {
3025                BEx_get_resources(adapter, &res);
3026                adapter->res = res;
3027        }
3028
3029        /* For BE3 only check if FW suggests a different max-txqs value */
3030        if (BE3_chip(adapter)) {
3031                status = be_cmd_get_profile_config(adapter, &res, 0);
3032                if (!status && res.max_tx_qs)
3033                        adapter->res.max_tx_qs =
3034                                min(adapter->res.max_tx_qs, res.max_tx_qs);
3035        }
3036
3037        /* For Lancer, SH etc read per-function resource limits from FW.
3038         * GET_FUNC_CONFIG returns per function guaranteed limits.
3039         * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3040         */
3041        if (!BEx_chip(adapter)) {
3042                status = be_cmd_get_func_config(adapter, &res);
3043                if (status)
3044                        return status;
3045
3046                /* If RoCE may be enabled stash away half the EQs for RoCE */
3047                if (be_roce_supported(adapter))
3048                        res.max_evt_qs /= 2;
3049                adapter->res = res;
3050
3051                if (be_physfn(adapter)) {
3052                        status = be_cmd_get_profile_config(adapter, &res, 0);
3053                        if (status)
3054                                return status;
3055                        adapter->res.max_vfs = res.max_vfs;
3056                }
3057
3058                dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3059                         be_max_txqs(adapter), be_max_rxqs(adapter),
3060                         be_max_rss(adapter), be_max_eqs(adapter),
3061                         be_max_vfs(adapter));
3062                dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3063                         be_max_uc(adapter), be_max_mc(adapter),
3064                         be_max_vlans(adapter));
3065        }
3066
3067        return 0;
3068}
3069
3070/* Routine to query per function resource limits */
3071static int be_get_config(struct be_adapter *adapter)
3072{
3073        int status;
3074
3075        status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3076                                     &adapter->function_mode,
3077                                     &adapter->function_caps,
3078                                     &adapter->asic_rev);
3079        if (status)
3080                return status;
3081
3082        status = be_get_resources(adapter);
3083        if (status)
3084                return status;
3085
3086        /* primary mac needs 1 pmac entry */
3087        adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3088                                   GFP_KERNEL);
3089        if (!adapter->pmac_id)
3090                return -ENOMEM;
3091
3092        /* Sanitize cfg_num_qs based on HW and platform limits */
3093        adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3094
3095        return 0;
3096}
3097
3098static int be_mac_setup(struct be_adapter *adapter)
3099{
3100        u8 mac[ETH_ALEN];
3101        int status;
3102
3103        if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3104                status = be_cmd_get_perm_mac(adapter, mac);
3105                if (status)
3106                        return status;
3107
3108                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3109                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3110        } else {
3111                /* Maybe the HW was reset; dev_addr must be re-programmed */
3112                memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3113        }
3114
3115        /* On BE3 VFs this cmd may fail due to lack of privilege.
3116         * Ignore the failure as in this case pmac_id is fetched
3117         * in the IFACE_CREATE cmd.
3118         */
3119        be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3120                        &adapter->pmac_id[0], 0);
3121        return 0;
3122}
3123
3124static void be_schedule_worker(struct be_adapter *adapter)
3125{
3126        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3127        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3128}
3129
3130static int be_setup_queues(struct be_adapter *adapter)
3131{
3132        struct net_device *netdev = adapter->netdev;
3133        int status;
3134
3135        status = be_evt_queues_create(adapter);
3136        if (status)
3137                goto err;
3138
3139        status = be_tx_qs_create(adapter);
3140        if (status)
3141                goto err;
3142
3143        status = be_rx_cqs_create(adapter);
3144        if (status)
3145                goto err;
3146
3147        status = be_mcc_queues_create(adapter);
3148        if (status)
3149                goto err;
3150
3151        status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3152        if (status)
3153                goto err;
3154
3155        status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3156        if (status)
3157                goto err;
3158
3159        return 0;
3160err:
3161        dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3162        return status;
3163}
3164
3165int be_update_queues(struct be_adapter *adapter)
3166{
3167        struct net_device *netdev = adapter->netdev;
3168        int status;
3169
3170        if (netif_running(netdev))
3171                be_close(netdev);
3172
3173        be_cancel_worker(adapter);
3174
3175        /* If any vectors have been shared with RoCE we cannot re-program
3176         * the MSIx table.
3177         */
3178        if (!adapter->num_msix_roce_vec)
3179                be_msix_disable(adapter);
3180
3181        be_clear_queues(adapter);
3182
3183        if (!msix_enabled(adapter)) {
3184                status = be_msix_enable(adapter);
3185                if (status)
3186                        return status;
3187        }
3188
3189        status = be_setup_queues(adapter);
3190        if (status)
3191                return status;
3192
3193        be_schedule_worker(adapter);
3194
3195        if (netif_running(netdev))
3196                status = be_open(netdev);
3197
3198        return status;
3199}
3200
3201static int be_setup(struct be_adapter *adapter)
3202{
3203        struct device *dev = &adapter->pdev->dev;
3204        u32 tx_fc, rx_fc, en_flags;
3205        int status;
3206
3207        be_setup_init(adapter);
3208
3209        if (!lancer_chip(adapter))
3210                be_cmd_req_native_mode(adapter);
3211
3212        status = be_get_config(adapter);
3213        if (status)
3214                goto err;
3215
3216        status = be_msix_enable(adapter);
3217        if (status)
3218                goto err;
3219
3220        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3221                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3222        if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3223                en_flags |= BE_IF_FLAGS_RSS;
3224        en_flags = en_flags & be_if_cap_flags(adapter);
3225        status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3226                                  &adapter->if_handle, 0);
3227        if (status)
3228                goto err;
3229
3230        /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3231        rtnl_lock();
3232        status = be_setup_queues(adapter);
3233        rtnl_unlock();
3234        if (status)
3235                goto err;
3236
3237        be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3238        /* In UMC mode FW does not return right privileges.
3239         * Override with correct privilege equivalent to PF.
3240         */
3241        if (be_is_mc(adapter))
3242                adapter->cmd_privileges = MAX_PRIVILEGES;
3243
3244        status = be_mac_setup(adapter);
3245        if (status)
3246                goto err;
3247
3248        be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3249
3250        if (adapter->vlans_added)
3251                be_vid_config(adapter);
3252
3253        be_set_rx_mode(adapter->netdev);
3254
3255        be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3256
3257        if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3258                be_cmd_set_flow_control(adapter, adapter->tx_fc,
3259                                        adapter->rx_fc);
3260
3261        if (be_physfn(adapter) && num_vfs) {
3262                if (be_max_vfs(adapter))
3263                        be_vf_setup(adapter);
3264                else
3265                        dev_warn(dev, "device doesn't support SRIOV\n");
3266        }
3267
3268        status = be_cmd_get_phy_info(adapter);
3269        if (!status && be_pause_supported(adapter))
3270                adapter->phy.fc_autoneg = 1;
3271
3272        be_schedule_worker(adapter);
3273        return 0;
3274err:
3275        be_clear(adapter);
3276        return status;
3277}
3278
3279#ifdef CONFIG_NET_POLL_CONTROLLER
3280static void be_netpoll(struct net_device *netdev)
3281{
3282        struct be_adapter *adapter = netdev_priv(netdev);
3283        struct be_eq_obj *eqo;
3284        int i;
3285
3286        for_all_evt_queues(adapter, eqo, i) {
3287                be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3288                napi_schedule(&eqo->napi);
3289        }
3290
3291        return;
3292}
3293#endif
3294
3295#define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3296static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3297
3298static bool be_flash_redboot(struct be_adapter *adapter,
3299                        const u8 *p, u32 img_start, int image_size,
3300                        int hdr_size)
3301{
3302        u32 crc_offset;
3303        u8 flashed_crc[4];
3304        int status;
3305
3306        crc_offset = hdr_size + img_start + image_size - 4;
3307
3308        p += crc_offset;
3309
3310        status = be_cmd_get_flash_crc(adapter, flashed_crc,
3311                        (image_size - 4));
3312        if (status) {
3313                dev_err(&adapter->pdev->dev,
3314                "could not get crc from flash, not flashing redboot\n");
3315                return false;
3316        }
3317
3318        /*update redboot only if crc does not match*/
3319        if (!memcmp(flashed_crc, p, 4))
3320                return false;
3321        else
3322                return true;
3323}
3324
3325static bool phy_flashing_required(struct be_adapter *adapter)
3326{
3327        return (adapter->phy.phy_type == TN_8022 &&
3328                adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3329}
3330
3331static bool is_comp_in_ufi(struct be_adapter *adapter,
3332                           struct flash_section_info *fsec, int type)
3333{
3334        int i = 0, img_type = 0;
3335        struct flash_section_info_g2 *fsec_g2 = NULL;
3336
3337        if (BE2_chip(adapter))
3338                fsec_g2 = (struct flash_section_info_g2 *)fsec;
3339
3340        for (i = 0; i < MAX_FLASH_COMP; i++) {
3341                if (fsec_g2)
3342                        img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3343                else
3344                        img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3345
3346                if (img_type == type)
3347                        return true;
3348        }
3349        return false;
3350
3351}
3352
3353static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3354                                         int header_size,
3355                                         const struct firmware *fw)
3356{
3357        struct flash_section_info *fsec = NULL;
3358        const u8 *p = fw->data;
3359
3360        p += header_size;
3361        while (p < (fw->data + fw->size)) {
3362                fsec = (struct flash_section_info *)p;
3363                if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3364                        return fsec;
3365                p += 32;
3366        }
3367        return NULL;
3368}
3369
3370static int be_flash(struct be_adapter *adapter, const u8 *img,
3371                struct be_dma_mem *flash_cmd, int optype, int img_size)
3372{
3373        u32 total_bytes = 0, flash_op, num_bytes = 0;
3374        int status = 0;
3375        struct be_cmd_write_flashrom *req = flash_cmd->va;
3376
3377        total_bytes = img_size;
3378        while (total_bytes) {
3379                num_bytes = min_t(u32, 32*1024, total_bytes);
3380
3381                total_bytes -= num_bytes;
3382
3383                if (!total_bytes) {
3384                        if (optype == OPTYPE_PHY_FW)
3385                                flash_op = FLASHROM_OPER_PHY_FLASH;
3386                        else
3387                                flash_op = FLASHROM_OPER_FLASH;
3388                } else {
3389                        if (optype == OPTYPE_PHY_FW)
3390                                flash_op = FLASHROM_OPER_PHY_SAVE;
3391                        else
3392                                flash_op = FLASHROM_OPER_SAVE;
3393                }
3394
3395                memcpy(req->data_buf, img, num_bytes);
3396                img += num_bytes;
3397                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3398                                                flash_op, num_bytes);
3399                if (status) {
3400                        if (status == ILLEGAL_IOCTL_REQ &&
3401                            optype == OPTYPE_PHY_FW)
3402                                break;
3403                        dev_err(&adapter->pdev->dev,
3404                                "cmd to write to flash rom failed.\n");
3405                        return status;
3406                }
3407        }
3408        return 0;
3409}
3410
3411/* For BE2, BE3 and BE3-R */
3412static int be_flash_BEx(struct be_adapter *adapter,
3413                         const struct firmware *fw,
3414                         struct be_dma_mem *flash_cmd,
3415                         int num_of_images)
3416
3417{
3418        int status = 0, i, filehdr_size = 0;
3419        int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3420        const u8 *p = fw->data;
3421        const struct flash_comp *pflashcomp;
3422        int num_comp, redboot;
3423        struct flash_section_info *fsec = NULL;
3424
3425        struct flash_comp gen3_flash_types[] = {
3426                { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3427                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3428                { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3429                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3430                { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3431                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3432                { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3433                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3434                { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3435                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3436                { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3437                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3438                { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3439                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3440                { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3441                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3442                { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3443                        FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3444                { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3445                        FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3446        };
3447
3448        struct flash_comp gen2_flash_types[] = {
3449                { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3450                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3451                { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3452                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3453                { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3454                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3455                { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3456                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3457                { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3458                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3459                { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3460                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3461                { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3462                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3463                { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3464                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3465        };
3466
3467        if (BE3_chip(adapter)) {
3468                pflashcomp = gen3_flash_types;
3469                filehdr_size = sizeof(struct flash_file_hdr_g3);
3470                num_comp = ARRAY_SIZE(gen3_flash_types);
3471        } else {
3472                pflashcomp = gen2_flash_types;
3473                filehdr_size = sizeof(struct flash_file_hdr_g2);
3474                num_comp = ARRAY_SIZE(gen2_flash_types);
3475        }
3476
3477        /* Get flash section info*/
3478        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3479        if (!fsec) {
3480                dev_err(&adapter->pdev->dev,
3481                        "Invalid Cookie. UFI corrupted ?\n");
3482                return -1;
3483        }
3484        for (i = 0; i < num_comp; i++) {
3485                if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3486                        continue;
3487
3488                if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3489                    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3490                        continue;
3491
3492                if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3493                    !phy_flashing_required(adapter))
3494                                continue;
3495
3496                if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3497                        redboot = be_flash_redboot(adapter, fw->data,
3498                                pflashcomp[i].offset, pflashcomp[i].size,
3499                                filehdr_size + img_hdrs_size);
3500                        if (!redboot)
3501                                continue;
3502                }
3503
3504                p = fw->data;
3505                p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3506                if (p + pflashcomp[i].size > fw->data + fw->size)
3507                        return -1;
3508
3509                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3510                                        pflashcomp[i].size);
3511                if (status) {
3512                        dev_err(&adapter->pdev->dev,
3513                                "Flashing section type %d failed.\n",
3514                                pflashcomp[i].img_type);
3515                        return status;
3516                }
3517        }
3518        return 0;
3519}
3520
3521static int be_flash_skyhawk(struct be_adapter *adapter,
3522                const struct firmware *fw,
3523                struct be_dma_mem *flash_cmd, int num_of_images)
3524{
3525        int status = 0, i, filehdr_size = 0;
3526        int img_offset, img_size, img_optype, redboot;
3527        int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3528        const u8 *p = fw->data;
3529        struct flash_section_info *fsec = NULL;
3530
3531        filehdr_size = sizeof(struct flash_file_hdr_g3);
3532        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3533        if (!fsec) {
3534                dev_err(&adapter->pdev->dev,
3535                        "Invalid Cookie. UFI corrupted ?\n");
3536                return -1;
3537        }
3538
3539        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3540                img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3541                img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3542
3543                switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3544                case IMAGE_FIRMWARE_iSCSI:
3545                        img_optype = OPTYPE_ISCSI_ACTIVE;
3546                        break;
3547                case IMAGE_BOOT_CODE:
3548                        img_optype = OPTYPE_REDBOOT;
3549                        break;
3550                case IMAGE_OPTION_ROM_ISCSI:
3551                        img_optype = OPTYPE_BIOS;
3552                        break;
3553                case IMAGE_OPTION_ROM_PXE:
3554                        img_optype = OPTYPE_PXE_BIOS;
3555                        break;
3556                case IMAGE_OPTION_ROM_FCoE:
3557                        img_optype = OPTYPE_FCOE_BIOS;
3558                        break;
3559                case IMAGE_FIRMWARE_BACKUP_iSCSI:
3560                        img_optype = OPTYPE_ISCSI_BACKUP;
3561                        break;
3562                case IMAGE_NCSI:
3563                        img_optype = OPTYPE_NCSI_FW;
3564                        break;
3565                default:
3566                        continue;
3567                }
3568
3569                if (img_optype == OPTYPE_REDBOOT) {
3570                        redboot = be_flash_redboot(adapter, fw->data,
3571                                        img_offset, img_size,
3572                                        filehdr_size + img_hdrs_size);
3573                        if (!redboot)
3574                                continue;
3575                }
3576
3577                p = fw->data;
3578                p += filehdr_size + img_offset + img_hdrs_size;
3579                if (p + img_size > fw->data + fw->size)
3580                        return -1;
3581
3582                status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3583                if (status) {
3584                        dev_err(&adapter->pdev->dev,
3585                                "Flashing section type %d failed.\n",
3586                                fsec->fsec_entry[i].type);
3587                        return status;
3588                }
3589        }
3590        return 0;
3591}
3592
3593static int lancer_fw_download(struct be_adapter *adapter,
3594                                const struct firmware *fw)
3595{
3596#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3597#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3598        struct be_dma_mem flash_cmd;
3599        const u8 *data_ptr = NULL;
3600        u8 *dest_image_ptr = NULL;
3601        size_t image_size = 0;
3602        u32 chunk_size = 0;
3603        u32 data_written = 0;
3604        u32 offset = 0;
3605        int status = 0;
3606        u8 add_status = 0;
3607        u8 change_status;
3608
3609        if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3610                dev_err(&adapter->pdev->dev,
3611                        "FW Image not properly aligned. "
3612                        "Length must be 4 byte aligned.\n");
3613                status = -EINVAL;
3614                goto lancer_fw_exit;
3615        }
3616
3617        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3618                                + LANCER_FW_DOWNLOAD_CHUNK;
3619        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3620                                          &flash_cmd.dma, GFP_KERNEL);
3621        if (!flash_cmd.va) {
3622                status = -ENOMEM;
3623                goto lancer_fw_exit;
3624        }
3625
3626        dest_image_ptr = flash_cmd.va +
3627                                sizeof(struct lancer_cmd_req_write_object);
3628        image_size = fw->size;
3629        data_ptr = fw->data;
3630
3631        while (image_size) {
3632                chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3633
3634                /* Copy the image chunk content. */
3635                memcpy(dest_image_ptr, data_ptr, chunk_size);
3636
3637                status = lancer_cmd_write_object(adapter, &flash_cmd,
3638                                                 chunk_size, offset,
3639                                                 LANCER_FW_DOWNLOAD_LOCATION,
3640                                                 &data_written, &change_status,
3641                                                 &add_status);
3642                if (status)
3643                        break;
3644
3645                offset += data_written;
3646                data_ptr += data_written;
3647                image_size -= data_written;
3648        }
3649
3650        if (!status) {
3651                /* Commit the FW written */
3652                status = lancer_cmd_write_object(adapter, &flash_cmd,
3653                                                 0, offset,
3654                                                 LANCER_FW_DOWNLOAD_LOCATION,
3655                                                 &data_written, &change_status,
3656                                                 &add_status);
3657        }
3658
3659        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3660                                flash_cmd.dma);
3661        if (status) {
3662                dev_err(&adapter->pdev->dev,
3663                        "Firmware load error. "
3664                        "Status code: 0x%x Additional Status: 0x%x\n",
3665                        status, add_status);
3666                goto lancer_fw_exit;
3667        }
3668
3669        if (change_status == LANCER_FW_RESET_NEEDED) {
3670                status = lancer_physdev_ctrl(adapter,
3671                                             PHYSDEV_CONTROL_FW_RESET_MASK);
3672                if (status) {
3673                        dev_err(&adapter->pdev->dev,
3674                                "Adapter busy for FW reset.\n"
3675                                "New FW will not be active.\n");
3676                        goto lancer_fw_exit;
3677                }
3678        } else if (change_status != LANCER_NO_RESET_NEEDED) {
3679                        dev_err(&adapter->pdev->dev,
3680                                "System reboot required for new FW"
3681                                " to be active\n");
3682        }
3683
3684        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3685lancer_fw_exit:
3686        return status;
3687}
3688
3689#define UFI_TYPE2               2
3690#define UFI_TYPE3               3
3691#define UFI_TYPE3R              10
3692#define UFI_TYPE4               4
3693static int be_get_ufi_type(struct be_adapter *adapter,
3694                           struct flash_file_hdr_g3 *fhdr)
3695{
3696        if (fhdr == NULL)
3697                goto be_get_ufi_exit;
3698
3699        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3700                return UFI_TYPE4;
3701        else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3702                if (fhdr->asic_type_rev == 0x10)
3703                        return UFI_TYPE3R;
3704                else
3705                        return UFI_TYPE3;
3706        } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3707                return UFI_TYPE2;
3708
3709be_get_ufi_exit:
3710        dev_err(&adapter->pdev->dev,
3711                "UFI and Interface are not compatible for flashing\n");
3712        return -1;
3713}
3714
3715static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3716{
3717        struct flash_file_hdr_g3 *fhdr3;
3718        struct image_hdr *img_hdr_ptr = NULL;
3719        struct be_dma_mem flash_cmd;
3720        const u8 *p;
3721        int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3722
3723        flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3724        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3725                                          &flash_cmd.dma, GFP_KERNEL);
3726        if (!flash_cmd.va) {
3727                status = -ENOMEM;
3728                goto be_fw_exit;
3729        }
3730
3731        p = fw->data;
3732        fhdr3 = (struct flash_file_hdr_g3 *)p;
3733
3734        ufi_type = be_get_ufi_type(adapter, fhdr3);
3735
3736        num_imgs = le32_to_cpu(fhdr3->num_imgs);
3737        for (i = 0; i < num_imgs; i++) {
3738                img_hdr_ptr = (struct image_hdr *)(fw->data +
3739                                (sizeof(struct flash_file_hdr_g3) +
3740                                 i * sizeof(struct image_hdr)));
3741                if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3742                        switch (ufi_type) {
3743                        case UFI_TYPE4:
3744                                status = be_flash_skyhawk(adapter, fw,
3745                                                        &flash_cmd, num_imgs);
3746                                break;
3747                        case UFI_TYPE3R:
3748                                status = be_flash_BEx(adapter, fw, &flash_cmd,
3749                                                      num_imgs);
3750                                break;
3751                        case UFI_TYPE3:
3752                                /* Do not flash this ufi on BE3-R cards */
3753                                if (adapter->asic_rev < 0x10)
3754                                        status = be_flash_BEx(adapter, fw,
3755                                                              &flash_cmd,
3756                                                              num_imgs);
3757                                else {
3758                                        status = -1;
3759                                        dev_err(&adapter->pdev->dev,
3760                                                "Can't load BE3 UFI on BE3R\n");
3761                                }
3762                        }
3763                }
3764        }
3765
3766        if (ufi_type == UFI_TYPE2)
3767                status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3768        else if (ufi_type == -1)
3769                status = -1;
3770
3771        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3772                          flash_cmd.dma);
3773        if (status) {
3774                dev_err(&adapter->pdev->dev, "Firmware load error\n");
3775                goto be_fw_exit;
3776        }
3777
3778        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3779
3780be_fw_exit:
3781        return status;
3782}
3783
3784int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3785{
3786        const struct firmware *fw;
3787        int status;
3788
3789        if (!netif_running(adapter->netdev)) {
3790                dev_err(&adapter->pdev->dev,
3791                        "Firmware load not allowed (interface is down)\n");
3792                return -1;
3793        }
3794
3795        status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3796        if (status)
3797                goto fw_exit;
3798
3799        dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3800
3801        if (lancer_chip(adapter))
3802                status = lancer_fw_download(adapter, fw);
3803        else
3804                status = be_fw_download(adapter, fw);
3805
3806        if (!status)
3807                be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3808                                  adapter->fw_on_flash);
3809
3810fw_exit:
3811        release_firmware(fw);
3812        return status;
3813}
3814
3815static int be_ndo_bridge_setlink(struct net_device *dev,
3816                                    struct nlmsghdr *nlh)
3817{
3818        struct be_adapter *adapter = netdev_priv(dev);
3819        struct nlattr *attr, *br_spec;
3820        int rem;
3821        int status = 0;
3822        u16 mode = 0;
3823
3824        if (!sriov_enabled(adapter))
3825                return -EOPNOTSUPP;
3826
3827        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3828
3829        nla_for_each_nested(attr, br_spec, rem) {
3830                if (nla_type(attr) != IFLA_BRIDGE_MODE)
3831                        continue;
3832
3833                mode = nla_get_u16(attr);
3834                if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3835                        return -EINVAL;
3836
3837                status = be_cmd_set_hsw_config(adapter, 0, 0,
3838                                               adapter->if_handle,
3839                                               mode == BRIDGE_MODE_VEPA ?
3840                                               PORT_FWD_TYPE_VEPA :
3841                                               PORT_FWD_TYPE_VEB);
3842                if (status)
3843                        goto err;
3844
3845                dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3846                         mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3847
3848                return status;
3849        }
3850err:
3851        dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3852                mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3853
3854        return status;
3855}
3856
3857static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3858                                    struct net_device *dev,
3859                                    u32 filter_mask)
3860{
3861        struct be_adapter *adapter = netdev_priv(dev);
3862        int status = 0;
3863        u8 hsw_mode;
3864
3865        if (!sriov_enabled(adapter))
3866                return 0;
3867
3868        /* BE and Lancer chips support VEB mode only */
3869        if (BEx_chip(adapter) || lancer_chip(adapter)) {
3870                hsw_mode = PORT_FWD_TYPE_VEB;
3871        } else {
3872                status = be_cmd_get_hsw_config(adapter, NULL, 0,
3873                                               adapter->if_handle, &hsw_mode);
3874                if (status)
3875                        return 0;
3876        }
3877
3878        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3879                                       hsw_mode == PORT_FWD_TYPE_VEPA ?
3880                                       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3881}
3882
3883static const struct net_device_ops be_netdev_ops = {
3884        .ndo_open               = be_open,
3885        .ndo_stop               = be_close,
3886        .ndo_start_xmit         = be_xmit,
3887        .ndo_set_rx_mode        = be_set_rx_mode,
3888        .ndo_set_mac_address    = be_mac_addr_set,
3889        .ndo_change_mtu         = be_change_mtu,
3890        .ndo_get_stats64        = be_get_stats64,
3891        .ndo_validate_addr      = eth_validate_addr,
3892        .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3893        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3894        .ndo_set_vf_mac         = be_set_vf_mac,
3895        .ndo_set_vf_vlan        = be_set_vf_vlan,
3896        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3897        .ndo_get_vf_config      = be_get_vf_config,
3898#ifdef CONFIG_NET_POLL_CONTROLLER
3899        .ndo_poll_controller    = be_netpoll,
3900#endif
3901        .ndo_bridge_setlink     = be_ndo_bridge_setlink,
3902        .ndo_bridge_getlink     = be_ndo_bridge_getlink,
3903};
3904
3905static void be_netdev_init(struct net_device *netdev)
3906{
3907        struct be_adapter *adapter = netdev_priv(netdev);
3908
3909        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3910                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3911                NETIF_F_HW_VLAN_CTAG_TX;
3912        if (be_multi_rxq(adapter))
3913                netdev->hw_features |= NETIF_F_RXHASH;
3914
3915        netdev->features |= netdev->hw_features |
3916                NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3917
3918        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3919                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3920
3921        netdev->priv_flags |= IFF_UNICAST_FLT;
3922
3923        netdev->flags |= IFF_MULTICAST;
3924
3925        netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3926
3927        netdev->netdev_ops = &be_netdev_ops;
3928
3929        SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3930}
3931
3932static void be_unmap_pci_bars(struct be_adapter *adapter)
3933{
3934        if (adapter->csr)
3935                pci_iounmap(adapter->pdev, adapter->csr);
3936        if (adapter->db)
3937                pci_iounmap(adapter->pdev, adapter->db);
3938}
3939
3940static int db_bar(struct be_adapter *adapter)
3941{
3942        if (lancer_chip(adapter) || !be_physfn(adapter))
3943                return 0;
3944        else
3945                return 4;
3946}
3947
3948static int be_roce_map_pci_bars(struct be_adapter *adapter)
3949{
3950        if (skyhawk_chip(adapter)) {
3951                adapter->roce_db.size = 4096;
3952                adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3953                                                              db_bar(adapter));
3954                adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3955                                                               db_bar(adapter));
3956        }
3957        return 0;
3958}
3959
3960static int be_map_pci_bars(struct be_adapter *adapter)
3961{
3962        u8 __iomem *addr;
3963        u32 sli_intf;
3964
3965        pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3966        adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3967                                SLI_INTF_IF_TYPE_SHIFT;
3968
3969        if (BEx_chip(adapter) && be_physfn(adapter)) {
3970                adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3971                if (adapter->csr == NULL)
3972                        return -ENOMEM;
3973        }
3974
3975        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3976        if (addr == NULL)
3977                goto pci_map_err;
3978        adapter->db = addr;
3979
3980        be_roce_map_pci_bars(adapter);
3981        return 0;
3982
3983pci_map_err:
3984        be_unmap_pci_bars(adapter);
3985        return -ENOMEM;
3986}
3987
3988static void be_ctrl_cleanup(struct be_adapter *adapter)
3989{
3990        struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3991
3992        be_unmap_pci_bars(adapter);
3993
3994        if (mem->va)
3995                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3996                                  mem->dma);
3997
3998        mem = &adapter->rx_filter;
3999        if (mem->va)
4000                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4001                                  mem->dma);
4002}
4003
4004static int be_ctrl_init(struct be_adapter *adapter)
4005{
4006        struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4007        struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4008        struct be_dma_mem *rx_filter = &adapter->rx_filter;
4009        u32 sli_intf;
4010        int status;
4011
4012        pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4013        adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4014                                 SLI_INTF_FAMILY_SHIFT;
4015        adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4016
4017        status = be_map_pci_bars(adapter);
4018        if (status)
4019                goto done;
4020
4021        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4022        mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4023                                                mbox_mem_alloc->size,
4024                                                &mbox_mem_alloc->dma,
4025                                                GFP_KERNEL);
4026        if (!mbox_mem_alloc->va) {
4027                status = -ENOMEM;
4028                goto unmap_pci_bars;
4029        }
4030        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4031        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4032        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4033        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4034
4035        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4036        rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4037                                            rx_filter->size, &rx_filter->dma,
4038                                            GFP_KERNEL);
4039        if (rx_filter->va == NULL) {
4040                status = -ENOMEM;
4041                goto free_mbox;
4042        }
4043
4044        mutex_init(&adapter->mbox_lock);
4045        spin_lock_init(&adapter->mcc_lock);
4046        spin_lock_init(&adapter->mcc_cq_lock);
4047
4048        init_completion(&adapter->flash_compl);
4049        pci_save_state(adapter->pdev);
4050        return 0;
4051
4052free_mbox:
4053        dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4054                          mbox_mem_alloc->va, mbox_mem_alloc->dma);
4055
4056unmap_pci_bars:
4057        be_unmap_pci_bars(adapter);
4058
4059done:
4060        return status;
4061}
4062
4063static void be_stats_cleanup(struct be_adapter *adapter)
4064{
4065        struct be_dma_mem *cmd = &adapter->stats_cmd;
4066
4067        if (cmd->va)
4068                dma_free_coherent(&adapter->pdev->dev, cmd->size,
4069                                  cmd->va, cmd->dma);
4070}
4071
4072static int be_stats_init(struct be_adapter *adapter)
4073{
4074        struct be_dma_mem *cmd = &adapter->stats_cmd;
4075
4076        if (lancer_chip(adapter))
4077                cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4078        else if (BE2_chip(adapter))
4079                cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4080        else
4081                /* BE3 and Skyhawk */
4082                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4083
4084        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4085                                      GFP_KERNEL);
4086        if (cmd->va == NULL)
4087                return -1;
4088        return 0;
4089}
4090
4091static void be_remove(struct pci_dev *pdev)
4092{
4093        struct be_adapter *adapter = pci_get_drvdata(pdev);
4094
4095        if (!adapter)
4096                return;
4097
4098        be_roce_dev_remove(adapter);
4099        be_intr_set(adapter, false);
4100
4101        cancel_delayed_work_sync(&adapter->func_recovery_work);
4102
4103        unregister_netdev(adapter->netdev);
4104
4105        be_clear(adapter);
4106
4107        /* tell fw we're done with firing cmds */
4108        be_cmd_fw_clean(adapter);
4109
4110        be_stats_cleanup(adapter);
4111
4112        be_ctrl_cleanup(adapter);
4113
4114        pci_disable_pcie_error_reporting(pdev);
4115
4116        pci_set_drvdata(pdev, NULL);
4117        pci_release_regions(pdev);
4118        pci_disable_device(pdev);
4119
4120        free_netdev(adapter->netdev);
4121}
4122
4123bool be_is_wol_supported(struct be_adapter *adapter)
4124{
4125        return ((adapter->wol_cap & BE_WOL_CAP) &&
4126                !be_is_wol_excluded(adapter)) ? true : false;
4127}
4128
4129u32 be_get_fw_log_level(struct be_adapter *adapter)
4130{
4131        struct be_dma_mem extfat_cmd;
4132        struct be_fat_conf_params *cfgs;
4133        int status;
4134        u32 level = 0;
4135        int j;
4136
4137        if (lancer_chip(adapter))
4138                return 0;
4139
4140        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4141        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4142        extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4143                                             &extfat_cmd.dma);
4144
4145        if (!extfat_cmd.va) {
4146                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4147                        __func__);
4148                goto err;
4149        }
4150
4151        status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4152        if (!status) {
4153                cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4154                                                sizeof(struct be_cmd_resp_hdr));
4155                for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4156                        if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4157                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4158                }
4159        }
4160        pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4161                            extfat_cmd.dma);
4162err:
4163        return level;
4164}
4165
4166static int be_get_initial_config(struct be_adapter *adapter)
4167{
4168        int status;
4169        u32 level;
4170
4171        status = be_cmd_get_cntl_attributes(adapter);
4172        if (status)
4173                return status;
4174
4175        status = be_cmd_get_acpi_wol_cap(adapter);
4176        if (status) {
4177                /* in case of a failure to get wol capabillities
4178                 * check the exclusion list to determine WOL capability */
4179                if (!be_is_wol_excluded(adapter))
4180                        adapter->wol_cap |= BE_WOL_CAP;
4181        }
4182
4183        if (be_is_wol_supported(adapter))
4184                adapter->wol = true;
4185
4186        /* Must be a power of 2 or else MODULO will BUG_ON */
4187        adapter->be_get_temp_freq = 64;
4188
4189        level = be_get_fw_log_level(adapter);
4190        adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4191
4192        adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4193        return 0;
4194}
4195
4196static int lancer_recover_func(struct be_adapter *adapter)
4197{
4198        struct device *dev = &adapter->pdev->dev;
4199        int status;
4200
4201        status = lancer_test_and_set_rdy_state(adapter);
4202        if (status)
4203                goto err;
4204
4205        if (netif_running(adapter->netdev))
4206                be_close(adapter->netdev);
4207
4208        be_clear(adapter);
4209
4210        be_clear_all_error(adapter);
4211
4212        status = be_setup(adapter);
4213        if (status)
4214                goto err;
4215
4216        if (netif_running(adapter->netdev)) {
4217                status = be_open(adapter->netdev);
4218                if (status)
4219                        goto err;
4220        }
4221
4222        dev_err(dev, "Error recovery successful\n");
4223        return 0;
4224err:
4225        if (status == -EAGAIN)
4226                dev_err(dev, "Waiting for resource provisioning\n");
4227        else
4228                dev_err(dev, "Error recovery failed\n");
4229
4230        return status;
4231}
4232
4233static void be_func_recovery_task(struct work_struct *work)
4234{
4235        struct be_adapter *adapter =
4236                container_of(work, struct be_adapter,  func_recovery_work.work);
4237        int status = 0;
4238
4239        be_detect_error(adapter);
4240
4241        if (adapter->hw_error && lancer_chip(adapter)) {
4242
4243                rtnl_lock();
4244                netif_device_detach(adapter->netdev);
4245                rtnl_unlock();
4246
4247                status = lancer_recover_func(adapter);
4248                if (!status)
4249                        netif_device_attach(adapter->netdev);
4250        }
4251
4252        /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4253         * no need to attempt further recovery.
4254         */
4255        if (!status || status == -EAGAIN)
4256                schedule_delayed_work(&adapter->func_recovery_work,
4257                                      msecs_to_jiffies(1000));
4258}
4259
4260static void be_worker(struct work_struct *work)
4261{
4262        struct be_adapter *adapter =
4263                container_of(work, struct be_adapter, work.work);
4264        struct be_rx_obj *rxo;
4265        struct be_eq_obj *eqo;
4266        int i;
4267
4268        /* when interrupts are not yet enabled, just reap any pending
4269        * mcc completions */
4270        if (!netif_running(adapter->netdev)) {
4271                local_bh_disable();
4272                be_process_mcc(adapter);
4273                local_bh_enable();
4274                goto reschedule;
4275        }
4276
4277        if (!adapter->stats_cmd_sent) {
4278                if (lancer_chip(adapter))
4279                        lancer_cmd_get_pport_stats(adapter,
4280                                                &adapter->stats_cmd);
4281                else
4282                        be_cmd_get_stats(adapter, &adapter->stats_cmd);
4283        }
4284
4285        if (be_physfn(adapter) &&
4286            MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4287                be_cmd_get_die_temperature(adapter);
4288
4289        for_all_rx_queues(adapter, rxo, i) {
4290                if (rxo->rx_post_starved) {
4291                        rxo->rx_post_starved = false;
4292                        be_post_rx_frags(rxo, GFP_KERNEL);
4293                }
4294        }
4295
4296        for_all_evt_queues(adapter, eqo, i)
4297                be_eqd_update(adapter, eqo);
4298
4299reschedule:
4300        adapter->work_counter++;
4301        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4302}
4303
4304/* If any VFs are already enabled don't FLR the PF */
4305static bool be_reset_required(struct be_adapter *adapter)
4306{
4307        return pci_num_vf(adapter->pdev) ? false : true;
4308}
4309
4310static char *mc_name(struct be_adapter *adapter)
4311{
4312        if (adapter->function_mode & FLEX10_MODE)
4313                return "FLEX10";
4314        else if (adapter->function_mode & VNIC_MODE)
4315                return "vNIC";
4316        else if (adapter->function_mode & UMC_ENABLED)
4317                return "UMC";
4318        else
4319                return "";
4320}
4321
4322static inline char *func_name(struct be_adapter *adapter)
4323{
4324        return be_physfn(adapter) ? "PF" : "VF";
4325}
4326
4327static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4328{
4329        int status = 0;
4330        struct be_adapter *adapter;
4331        struct net_device *netdev;
4332        char port_name;
4333
4334        status = pci_enable_device(pdev);
4335        if (status)
4336                goto do_none;
4337
4338        status = pci_request_regions(pdev, DRV_NAME);
4339        if (status)
4340                goto disable_dev;
4341        pci_set_master(pdev);
4342
4343        netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4344        if (netdev == NULL) {
4345                status = -ENOMEM;
4346                goto rel_reg;
4347        }
4348        adapter = netdev_priv(netdev);
4349        adapter->pdev = pdev;
4350        pci_set_drvdata(pdev, adapter);
4351        adapter->netdev = netdev;
4352        SET_NETDEV_DEV(netdev, &pdev->dev);
4353
4354        status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4355        if (!status) {
4356                status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4357                if (status < 0) {
4358                        dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4359                        goto free_netdev;
4360                }
4361                netdev->features |= NETIF_F_HIGHDMA;
4362        } else {
4363                status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4364                if (!status)
4365                        status = dma_set_coherent_mask(&pdev->dev,
4366                                                       DMA_BIT_MASK(32));
4367                if (status) {
4368                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4369                        goto free_netdev;
4370                }
4371        }
4372
4373        status = pci_enable_pcie_error_reporting(pdev);
4374        if (status)
4375                dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4376
4377        status = be_ctrl_init(adapter);
4378        if (status)
4379                goto free_netdev;
4380
4381        /* sync up with fw's ready state */
4382        if (be_physfn(adapter)) {
4383                status = be_fw_wait_ready(adapter);
4384                if (status)
4385                        goto ctrl_clean;
4386        }
4387
4388        if (be_reset_required(adapter)) {
4389                status = be_cmd_reset_function(adapter);
4390                if (status)
4391                        goto ctrl_clean;
4392
4393                /* Wait for interrupts to quiesce after an FLR */
4394                msleep(100);
4395        }
4396
4397        /* Allow interrupts for other ULPs running on NIC function */
4398        be_intr_set(adapter, true);
4399
4400        /* tell fw we're ready to fire cmds */
4401        status = be_cmd_fw_init(adapter);
4402        if (status)
4403                goto ctrl_clean;
4404
4405        status = be_stats_init(adapter);
4406        if (status)
4407                goto ctrl_clean;
4408
4409        status = be_get_initial_config(adapter);
4410        if (status)
4411                goto stats_clean;
4412
4413        INIT_DELAYED_WORK(&adapter->work, be_worker);
4414        INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4415        adapter->rx_fc = adapter->tx_fc = true;
4416
4417        status = be_setup(adapter);
4418        if (status)
4419                goto stats_clean;
4420
4421        be_netdev_init(netdev);
4422        status = register_netdev(netdev);
4423        if (status != 0)
4424                goto unsetup;
4425
4426        be_roce_dev_add(adapter);
4427
4428        schedule_delayed_work(&adapter->func_recovery_work,
4429                              msecs_to_jiffies(1000));
4430
4431        be_cmd_query_port_name(adapter, &port_name);
4432
4433        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4434                 func_name(adapter), mc_name(adapter), port_name);
4435
4436        return 0;
4437
4438unsetup:
4439        be_clear(adapter);
4440stats_clean:
4441        be_stats_cleanup(adapter);
4442ctrl_clean:
4443        be_ctrl_cleanup(adapter);
4444free_netdev:
4445        free_netdev(netdev);
4446        pci_set_drvdata(pdev, NULL);
4447rel_reg:
4448        pci_release_regions(pdev);
4449disable_dev:
4450        pci_disable_device(pdev);
4451do_none:
4452        dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4453        return status;
4454}
4455
4456static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4457{
4458        struct be_adapter *adapter = pci_get_drvdata(pdev);
4459        struct net_device *netdev =  adapter->netdev;
4460
4461        if (adapter->wol)
4462                be_setup_wol(adapter, true);
4463
4464        cancel_delayed_work_sync(&adapter->func_recovery_work);
4465
4466        netif_device_detach(netdev);
4467        if (netif_running(netdev)) {
4468                rtnl_lock();
4469                be_close(netdev);
4470                rtnl_unlock();
4471        }
4472        be_clear(adapter);
4473
4474        pci_save_state(pdev);
4475        pci_disable_device(pdev);
4476        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4477        return 0;
4478}
4479
4480static int be_resume(struct pci_dev *pdev)
4481{
4482        int status = 0;
4483        struct be_adapter *adapter = pci_get_drvdata(pdev);
4484        struct net_device *netdev =  adapter->netdev;
4485
4486        netif_device_detach(netdev);
4487
4488        status = pci_enable_device(pdev);
4489        if (status)
4490                return status;
4491
4492        pci_set_power_state(pdev, PCI_D0);
4493        pci_restore_state(pdev);
4494
4495        status = be_fw_wait_ready(adapter);
4496        if (status)
4497                return status;
4498
4499        /* tell fw we're ready to fire cmds */
4500        status = be_cmd_fw_init(adapter);
4501        if (status)
4502                return status;
4503
4504        be_setup(adapter);
4505        if (netif_running(netdev)) {
4506                rtnl_lock();
4507                be_open(netdev);
4508                rtnl_unlock();
4509        }
4510
4511        schedule_delayed_work(&adapter->func_recovery_work,
4512                              msecs_to_jiffies(1000));
4513        netif_device_attach(netdev);
4514
4515        if (adapter->wol)
4516                be_setup_wol(adapter, false);
4517
4518        return 0;
4519}
4520
4521/*
4522 * An FLR will stop BE from DMAing any data.
4523 */
4524static void be_shutdown(struct pci_dev *pdev)
4525{
4526        struct be_adapter *adapter = pci_get_drvdata(pdev);
4527
4528        if (!adapter)
4529                return;
4530
4531        cancel_delayed_work_sync(&adapter->work);
4532        cancel_delayed_work_sync(&adapter->func_recovery_work);
4533
4534        netif_device_detach(adapter->netdev);
4535
4536        be_cmd_reset_function(adapter);
4537
4538        pci_disable_device(pdev);
4539}
4540
4541static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4542                                pci_channel_state_t state)
4543{
4544        struct be_adapter *adapter = pci_get_drvdata(pdev);
4545        struct net_device *netdev =  adapter->netdev;
4546
4547        dev_err(&adapter->pdev->dev, "EEH error detected\n");
4548
4549        if (!adapter->eeh_error) {
4550                adapter->eeh_error = true;
4551
4552                cancel_delayed_work_sync(&adapter->func_recovery_work);
4553
4554                rtnl_lock();
4555                netif_device_detach(netdev);
4556                if (netif_running(netdev))
4557                        be_close(netdev);
4558                rtnl_unlock();
4559
4560                be_clear(adapter);
4561        }
4562
4563        if (state == pci_channel_io_perm_failure)
4564                return PCI_ERS_RESULT_DISCONNECT;
4565
4566        pci_disable_device(pdev);
4567
4568        /* The error could cause the FW to trigger a flash debug dump.
4569         * Resetting the card while flash dump is in progress
4570         * can cause it not to recover; wait for it to finish.
4571         * Wait only for first function as it is needed only once per
4572         * adapter.
4573         */
4574        if (pdev->devfn == 0)
4575                ssleep(30);
4576
4577        return PCI_ERS_RESULT_NEED_RESET;
4578}
4579
4580static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4581{
4582        struct be_adapter *adapter = pci_get_drvdata(pdev);
4583        int status;
4584
4585        dev_info(&adapter->pdev->dev, "EEH reset\n");
4586
4587        status = pci_enable_device(pdev);
4588        if (status)
4589                return PCI_ERS_RESULT_DISCONNECT;
4590
4591        pci_set_master(pdev);
4592        pci_set_power_state(pdev, PCI_D0);
4593        pci_restore_state(pdev);
4594
4595        /* Check if card is ok and fw is ready */
4596        dev_info(&adapter->pdev->dev,
4597                 "Waiting for FW to be ready after EEH reset\n");
4598        status = be_fw_wait_ready(adapter);
4599        if (status)
4600                return PCI_ERS_RESULT_DISCONNECT;
4601
4602        pci_cleanup_aer_uncorrect_error_status(pdev);
4603        be_clear_all_error(adapter);
4604        return PCI_ERS_RESULT_RECOVERED;
4605}
4606
4607static void be_eeh_resume(struct pci_dev *pdev)
4608{
4609        int status = 0;
4610        struct be_adapter *adapter = pci_get_drvdata(pdev);
4611        struct net_device *netdev =  adapter->netdev;
4612
4613        dev_info(&adapter->pdev->dev, "EEH resume\n");
4614
4615        pci_save_state(pdev);
4616
4617        status = be_cmd_reset_function(adapter);
4618        if (status)
4619                goto err;
4620
4621        /* tell fw we're ready to fire cmds */
4622        status = be_cmd_fw_init(adapter);
4623        if (status)
4624                goto err;
4625
4626        status = be_setup(adapter);
4627        if (status)
4628                goto err;
4629
4630        if (netif_running(netdev)) {
4631                status = be_open(netdev);
4632                if (status)
4633                        goto err;
4634        }
4635
4636        schedule_delayed_work(&adapter->func_recovery_work,
4637                              msecs_to_jiffies(1000));
4638        netif_device_attach(netdev);
4639        return;
4640err:
4641        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4642}
4643
4644static const struct pci_error_handlers be_eeh_handlers = {
4645        .error_detected = be_eeh_err_detected,
4646        .slot_reset = be_eeh_reset,
4647        .resume = be_eeh_resume,
4648};
4649
4650static struct pci_driver be_driver = {
4651        .name = DRV_NAME,
4652        .id_table = be_dev_ids,
4653        .probe = be_probe,
4654        .remove = be_remove,
4655        .suspend = be_suspend,
4656        .resume = be_resume,
4657        .shutdown = be_shutdown,
4658        .err_handler = &be_eeh_handlers
4659};
4660
4661static int __init be_init_module(void)
4662{
4663        if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4664            rx_frag_size != 2048) {
4665                printk(KERN_WARNING DRV_NAME
4666                        " : Module param rx_frag_size must be 2048/4096/8192."
4667                        " Using 2048\n");
4668                rx_frag_size = 2048;
4669        }
4670
4671        return pci_register_driver(&be_driver);
4672}
4673module_init(be_init_module);
4674
4675static void __exit be_exit_module(void)
4676{
4677        pci_unregister_driver(&be_driver);
4678}
4679module_exit(be_exit_module);
4680