linux/drivers/net/ethernet/emulex/benet/be_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2013 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#include <linux/prefetch.h>
  19#include <linux/module.h>
  20#include "be.h"
  21#include "be_cmds.h"
  22#include <asm/div64.h>
  23#include <linux/aer.h>
  24
  25MODULE_VERSION(DRV_VER);
  26MODULE_DEVICE_TABLE(pci, be_dev_ids);
  27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
  28MODULE_AUTHOR("Emulex Corporation");
  29MODULE_LICENSE("GPL");
  30
  31static unsigned int num_vfs;
  32module_param(num_vfs, uint, S_IRUGO);
  33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
  34
  35static ushort rx_frag_size = 2048;
  36module_param(rx_frag_size, ushort, S_IRUGO);
  37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
  38
  39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
  40        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  41        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
  42        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  43        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  44        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
  45        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
  46        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
  47        { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
  48        { 0 }
  49};
  50MODULE_DEVICE_TABLE(pci, be_dev_ids);
  51/* UE Status Low CSR */
  52static const char * const ue_status_low_desc[] = {
  53        "CEV",
  54        "CTX",
  55        "DBUF",
  56        "ERX",
  57        "Host",
  58        "MPU",
  59        "NDMA",
  60        "PTC ",
  61        "RDMA ",
  62        "RXF ",
  63        "RXIPS ",
  64        "RXULP0 ",
  65        "RXULP1 ",
  66        "RXULP2 ",
  67        "TIM ",
  68        "TPOST ",
  69        "TPRE ",
  70        "TXIPS ",
  71        "TXULP0 ",
  72        "TXULP1 ",
  73        "UC ",
  74        "WDMA ",
  75        "TXULP2 ",
  76        "HOST1 ",
  77        "P0_OB_LINK ",
  78        "P1_OB_LINK ",
  79        "HOST_GPIO ",
  80        "MBOX ",
  81        "AXGMAC0",
  82        "AXGMAC1",
  83        "JTAG",
  84        "MPU_INTPEND"
  85};
  86/* UE Status High CSR */
  87static const char * const ue_status_hi_desc[] = {
  88        "LPCMEMHOST",
  89        "MGMT_MAC",
  90        "PCS0ONLINE",
  91        "MPU_IRAM",
  92        "PCS1ONLINE",
  93        "PCTL0",
  94        "PCTL1",
  95        "PMEM",
  96        "RR",
  97        "TXPB",
  98        "RXPP",
  99        "XAUI",
 100        "TXP",
 101        "ARM",
 102        "IPC",
 103        "HOST2",
 104        "HOST3",
 105        "HOST4",
 106        "HOST5",
 107        "HOST6",
 108        "HOST7",
 109        "HOST8",
 110        "HOST9",
 111        "NETC",
 112        "Unknown",
 113        "Unknown",
 114        "Unknown",
 115        "Unknown",
 116        "Unknown",
 117        "Unknown",
 118        "Unknown",
 119        "Unknown"
 120};
 121
 122/* Is BE in a multi-channel mode */
 123static inline bool be_is_mc(struct be_adapter *adapter) {
 124        return (adapter->function_mode & FLEX10_MODE ||
 125                adapter->function_mode & VNIC_MODE ||
 126                adapter->function_mode & UMC_ENABLED);
 127}
 128
 129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 130{
 131        struct be_dma_mem *mem = &q->dma_mem;
 132        if (mem->va) {
 133                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
 134                                  mem->dma);
 135                mem->va = NULL;
 136        }
 137}
 138
 139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
 140                u16 len, u16 entry_size)
 141{
 142        struct be_dma_mem *mem = &q->dma_mem;
 143
 144        memset(q, 0, sizeof(*q));
 145        q->len = len;
 146        q->entry_size = entry_size;
 147        mem->size = len * entry_size;
 148        mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
 149                                     GFP_KERNEL | __GFP_ZERO);
 150        if (!mem->va)
 151                return -ENOMEM;
 152        return 0;
 153}
 154
 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
 156{
 157        u32 reg, enabled;
 158
 159        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
 160                                &reg);
 161        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 162
 163        if (!enabled && enable)
 164                reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 165        else if (enabled && !enable)
 166                reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 167        else
 168                return;
 169
 170        pci_write_config_dword(adapter->pdev,
 171                        PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
 172}
 173
 174static void be_intr_set(struct be_adapter *adapter, bool enable)
 175{
 176        int status = 0;
 177
 178        /* On lancer interrupts can't be controlled via this register */
 179        if (lancer_chip(adapter))
 180                return;
 181
 182        if (adapter->eeh_error)
 183                return;
 184
 185        status = be_cmd_intr_set(adapter, enable);
 186        if (status)
 187                be_reg_intr_set(adapter, enable);
 188}
 189
 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 191{
 192        u32 val = 0;
 193        val |= qid & DB_RQ_RING_ID_MASK;
 194        val |= posted << DB_RQ_NUM_POSTED_SHIFT;
 195
 196        wmb();
 197        iowrite32(val, adapter->db + DB_RQ_OFFSET);
 198}
 199
 200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 201                          u16 posted)
 202{
 203        u32 val = 0;
 204        val |= txo->q.id & DB_TXULP_RING_ID_MASK;
 205        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
 206
 207        wmb();
 208        iowrite32(val, adapter->db + txo->db_offset);
 209}
 210
 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 212                bool arm, bool clear_int, u16 num_popped)
 213{
 214        u32 val = 0;
 215        val |= qid & DB_EQ_RING_ID_MASK;
 216        val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
 217                        DB_EQ_RING_ID_EXT_MASK_SHIFT);
 218
 219        if (adapter->eeh_error)
 220                return;
 221
 222        if (arm)
 223                val |= 1 << DB_EQ_REARM_SHIFT;
 224        if (clear_int)
 225                val |= 1 << DB_EQ_CLR_SHIFT;
 226        val |= 1 << DB_EQ_EVNT_SHIFT;
 227        val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
 228        iowrite32(val, adapter->db + DB_EQ_OFFSET);
 229}
 230
 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
 232{
 233        u32 val = 0;
 234        val |= qid & DB_CQ_RING_ID_MASK;
 235        val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
 236                        DB_CQ_RING_ID_EXT_MASK_SHIFT);
 237
 238        if (adapter->eeh_error)
 239                return;
 240
 241        if (arm)
 242                val |= 1 << DB_CQ_REARM_SHIFT;
 243        val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
 244        iowrite32(val, adapter->db + DB_CQ_OFFSET);
 245}
 246
 247static int be_mac_addr_set(struct net_device *netdev, void *p)
 248{
 249        struct be_adapter *adapter = netdev_priv(netdev);
 250        struct sockaddr *addr = p;
 251        int status = 0;
 252        u8 current_mac[ETH_ALEN];
 253        u32 pmac_id = adapter->pmac_id[0];
 254        bool active_mac = true;
 255
 256        if (!is_valid_ether_addr(addr->sa_data))
 257                return -EADDRNOTAVAIL;
 258
 259        /* For BE VF, MAC address is already activated by PF.
 260         * Hence only operation left is updating netdev->devaddr.
 261         * Update it if user is passing the same MAC which was used
 262         * during configuring VF MAC from PF(Hypervisor).
 263         */
 264        if (!lancer_chip(adapter) && !be_physfn(adapter)) {
 265                status = be_cmd_mac_addr_query(adapter, current_mac,
 266                                               false, adapter->if_handle, 0);
 267                if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
 268                        goto done;
 269                else
 270                        goto err;
 271        }
 272
 273        if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
 274                goto done;
 275
 276        /* For Lancer check if any MAC is active.
 277         * If active, get its mac id.
 278         */
 279        if (lancer_chip(adapter) && !be_physfn(adapter))
 280                be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
 281                                         &pmac_id, 0);
 282
 283        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
 284                                 adapter->if_handle,
 285                                 &adapter->pmac_id[0], 0);
 286
 287        if (status)
 288                goto err;
 289
 290        if (active_mac)
 291                be_cmd_pmac_del(adapter, adapter->if_handle,
 292                                pmac_id, 0);
 293done:
 294        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 295        return 0;
 296err:
 297        dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
 298        return status;
 299}
 300
 301/* BE2 supports only v0 cmd */
 302static void *hw_stats_from_cmd(struct be_adapter *adapter)
 303{
 304        if (BE2_chip(adapter)) {
 305                struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
 306
 307                return &cmd->hw_stats;
 308        } else  {
 309                struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
 310
 311                return &cmd->hw_stats;
 312        }
 313}
 314
 315/* BE2 supports only v0 cmd */
 316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
 317{
 318        if (BE2_chip(adapter)) {
 319                struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 320
 321                return &hw_stats->erx;
 322        } else {
 323                struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 324
 325                return &hw_stats->erx;
 326        }
 327}
 328
 329static void populate_be_v0_stats(struct be_adapter *adapter)
 330{
 331        struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 332        struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
 333        struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
 334        struct be_port_rxf_stats_v0 *port_stats =
 335                                        &rxf_stats->port[adapter->port_num];
 336        struct be_drv_stats *drvs = &adapter->drv_stats;
 337
 338        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
 339        drvs->rx_pause_frames = port_stats->rx_pause_frames;
 340        drvs->rx_crc_errors = port_stats->rx_crc_errors;
 341        drvs->rx_control_frames = port_stats->rx_control_frames;
 342        drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
 343        drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
 344        drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
 345        drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
 346        drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
 347        drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
 348        drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
 349        drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
 350        drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
 351        drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
 352        drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
 353        drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
 354        drvs->rx_dropped_header_too_small =
 355                port_stats->rx_dropped_header_too_small;
 356        drvs->rx_address_filtered =
 357                                        port_stats->rx_address_filtered +
 358                                        port_stats->rx_vlan_filtered;
 359        drvs->rx_alignment_symbol_errors =
 360                port_stats->rx_alignment_symbol_errors;
 361
 362        drvs->tx_pauseframes = port_stats->tx_pauseframes;
 363        drvs->tx_controlframes = port_stats->tx_controlframes;
 364
 365        if (adapter->port_num)
 366                drvs->jabber_events = rxf_stats->port1_jabber_events;
 367        else
 368                drvs->jabber_events = rxf_stats->port0_jabber_events;
 369        drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
 370        drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
 371        drvs->forwarded_packets = rxf_stats->forwarded_packets;
 372        drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
 373        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
 374        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
 375        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 376}
 377
 378static void populate_be_v1_stats(struct be_adapter *adapter)
 379{
 380        struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 381        struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
 382        struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
 383        struct be_port_rxf_stats_v1 *port_stats =
 384                                        &rxf_stats->port[adapter->port_num];
 385        struct be_drv_stats *drvs = &adapter->drv_stats;
 386
 387        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
 388        drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
 389        drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
 390        drvs->rx_pause_frames = port_stats->rx_pause_frames;
 391        drvs->rx_crc_errors = port_stats->rx_crc_errors;
 392        drvs->rx_control_frames = port_stats->rx_control_frames;
 393        drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
 394        drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
 395        drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
 396        drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
 397        drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
 398        drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
 399        drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
 400        drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
 401        drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
 402        drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
 403        drvs->rx_dropped_header_too_small =
 404                port_stats->rx_dropped_header_too_small;
 405        drvs->rx_input_fifo_overflow_drop =
 406                port_stats->rx_input_fifo_overflow_drop;
 407        drvs->rx_address_filtered = port_stats->rx_address_filtered;
 408        drvs->rx_alignment_symbol_errors =
 409                port_stats->rx_alignment_symbol_errors;
 410        drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
 411        drvs->tx_pauseframes = port_stats->tx_pauseframes;
 412        drvs->tx_controlframes = port_stats->tx_controlframes;
 413        drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
 414        drvs->jabber_events = port_stats->jabber_events;
 415        drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
 416        drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
 417        drvs->forwarded_packets = rxf_stats->forwarded_packets;
 418        drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
 419        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
 420        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
 421        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 422}
 423
 424static void populate_lancer_stats(struct be_adapter *adapter)
 425{
 426
 427        struct be_drv_stats *drvs = &adapter->drv_stats;
 428        struct lancer_pport_stats *pport_stats =
 429                                        pport_stats_from_cmd(adapter);
 430
 431        be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
 432        drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
 433        drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
 434        drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
 435        drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
 436        drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
 437        drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
 438        drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
 439        drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
 440        drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
 441        drvs->rx_dropped_tcp_length =
 442                                pport_stats->rx_dropped_invalid_tcp_length;
 443        drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
 444        drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
 445        drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
 446        drvs->rx_dropped_header_too_small =
 447                                pport_stats->rx_dropped_header_too_small;
 448        drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
 449        drvs->rx_address_filtered =
 450                                        pport_stats->rx_address_filtered +
 451                                        pport_stats->rx_vlan_filtered;
 452        drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
 453        drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
 454        drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
 455        drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
 456        drvs->jabber_events = pport_stats->rx_jabbers;
 457        drvs->forwarded_packets = pport_stats->num_forwards_lo;
 458        drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
 459        drvs->rx_drops_too_many_frags =
 460                                pport_stats->rx_drops_too_many_frags_lo;
 461}
 462
 463static void accumulate_16bit_val(u32 *acc, u16 val)
 464{
 465#define lo(x)                   (x & 0xFFFF)
 466#define hi(x)                   (x & 0xFFFF0000)
 467        bool wrapped = val < lo(*acc);
 468        u32 newacc = hi(*acc) + val;
 469
 470        if (wrapped)
 471                newacc += 65536;
 472        ACCESS_ONCE(*acc) = newacc;
 473}
 474
 475void populate_erx_stats(struct be_adapter *adapter,
 476                        struct be_rx_obj *rxo,
 477                        u32 erx_stat)
 478{
 479        if (!BEx_chip(adapter))
 480                rx_stats(rxo)->rx_drops_no_frags = erx_stat;
 481        else
 482                /* below erx HW counter can actually wrap around after
 483                 * 65535. Driver accumulates a 32-bit value
 484                 */
 485                accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
 486                                     (u16)erx_stat);
 487}
 488
 489void be_parse_stats(struct be_adapter *adapter)
 490{
 491        struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
 492        struct be_rx_obj *rxo;
 493        int i;
 494        u32 erx_stat;
 495
 496        if (lancer_chip(adapter)) {
 497                populate_lancer_stats(adapter);
 498        } else {
 499                if (BE2_chip(adapter))
 500                        populate_be_v0_stats(adapter);
 501                else
 502                        /* for BE3 and Skyhawk */
 503                        populate_be_v1_stats(adapter);
 504
 505                /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
 506                for_all_rx_queues(adapter, rxo, i) {
 507                        erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
 508                        populate_erx_stats(adapter, rxo, erx_stat);
 509                }
 510        }
 511}
 512
 513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
 514                                        struct rtnl_link_stats64 *stats)
 515{
 516        struct be_adapter *adapter = netdev_priv(netdev);
 517        struct be_drv_stats *drvs = &adapter->drv_stats;
 518        struct be_rx_obj *rxo;
 519        struct be_tx_obj *txo;
 520        u64 pkts, bytes;
 521        unsigned int start;
 522        int i;
 523
 524        for_all_rx_queues(adapter, rxo, i) {
 525                const struct be_rx_stats *rx_stats = rx_stats(rxo);
 526                do {
 527                        start = u64_stats_fetch_begin_bh(&rx_stats->sync);
 528                        pkts = rx_stats(rxo)->rx_pkts;
 529                        bytes = rx_stats(rxo)->rx_bytes;
 530                } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
 531                stats->rx_packets += pkts;
 532                stats->rx_bytes += bytes;
 533                stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
 534                stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
 535                                        rx_stats(rxo)->rx_drops_no_frags;
 536        }
 537
 538        for_all_tx_queues(adapter, txo, i) {
 539                const struct be_tx_stats *tx_stats = tx_stats(txo);
 540                do {
 541                        start = u64_stats_fetch_begin_bh(&tx_stats->sync);
 542                        pkts = tx_stats(txo)->tx_pkts;
 543                        bytes = tx_stats(txo)->tx_bytes;
 544                } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
 545                stats->tx_packets += pkts;
 546                stats->tx_bytes += bytes;
 547        }
 548
 549        /* bad pkts received */
 550        stats->rx_errors = drvs->rx_crc_errors +
 551                drvs->rx_alignment_symbol_errors +
 552                drvs->rx_in_range_errors +
 553                drvs->rx_out_range_errors +
 554                drvs->rx_frame_too_long +
 555                drvs->rx_dropped_too_small +
 556                drvs->rx_dropped_too_short +
 557                drvs->rx_dropped_header_too_small +
 558                drvs->rx_dropped_tcp_length +
 559                drvs->rx_dropped_runt;
 560
 561        /* detailed rx errors */
 562        stats->rx_length_errors = drvs->rx_in_range_errors +
 563                drvs->rx_out_range_errors +
 564                drvs->rx_frame_too_long;
 565
 566        stats->rx_crc_errors = drvs->rx_crc_errors;
 567
 568        /* frame alignment errors */
 569        stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
 570
 571        /* receiver fifo overrun */
 572        /* drops_no_pbuf is no per i/f, it's per BE card */
 573        stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
 574                                drvs->rx_input_fifo_overflow_drop +
 575                                drvs->rx_drops_no_pbuf;
 576        return stats;
 577}
 578
 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 580{
 581        struct net_device *netdev = adapter->netdev;
 582
 583        if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
 584                netif_carrier_off(netdev);
 585                adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
 586        }
 587
 588        if ((link_status & LINK_STATUS_MASK) == LINK_UP)
 589                netif_carrier_on(netdev);
 590        else
 591                netif_carrier_off(netdev);
 592}
 593
 594static void be_tx_stats_update(struct be_tx_obj *txo,
 595                        u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
 596{
 597        struct be_tx_stats *stats = tx_stats(txo);
 598
 599        u64_stats_update_begin(&stats->sync);
 600        stats->tx_reqs++;
 601        stats->tx_wrbs += wrb_cnt;
 602        stats->tx_bytes += copied;
 603        stats->tx_pkts += (gso_segs ? gso_segs : 1);
 604        if (stopped)
 605                stats->tx_stops++;
 606        u64_stats_update_end(&stats->sync);
 607}
 608
 609/* Determine number of WRB entries needed to xmit data in an skb */
 610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
 611                                                                bool *dummy)
 612{
 613        int cnt = (skb->len > skb->data_len);
 614
 615        cnt += skb_shinfo(skb)->nr_frags;
 616
 617        /* to account for hdr wrb */
 618        cnt++;
 619        if (lancer_chip(adapter) || !(cnt & 1)) {
 620                *dummy = false;
 621        } else {
 622                /* add a dummy to make it an even num */
 623                cnt++;
 624                *dummy = true;
 625        }
 626        BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
 627        return cnt;
 628}
 629
 630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 631{
 632        wrb->frag_pa_hi = upper_32_bits(addr);
 633        wrb->frag_pa_lo = addr & 0xFFFFFFFF;
 634        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 635        wrb->rsvd0 = 0;
 636}
 637
 638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
 639                                        struct sk_buff *skb)
 640{
 641        u8 vlan_prio;
 642        u16 vlan_tag;
 643
 644        vlan_tag = vlan_tx_tag_get(skb);
 645        vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 646        /* If vlan priority provided by OS is NOT in available bmap */
 647        if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
 648                vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
 649                                adapter->recommended_prio;
 650
 651        return vlan_tag;
 652}
 653
 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 655                struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
 656{
 657        u16 vlan_tag;
 658
 659        memset(hdr, 0, sizeof(*hdr));
 660
 661        AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
 662
 663        if (skb_is_gso(skb)) {
 664                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
 665                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
 666                        hdr, skb_shinfo(skb)->gso_size);
 667                if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
 668                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
 669        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 670                if (is_tcp_pkt(skb))
 671                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
 672                else if (is_udp_pkt(skb))
 673                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
 674        }
 675
 676        if (vlan_tx_tag_present(skb)) {
 677                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
 678                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 679                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
 680        }
 681
 682        /* To skip HW VLAN tagging: evt = 1, compl = 0 */
 683        AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
 684        AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
 685        AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
 686        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 687}
 688
 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 690                bool unmap_single)
 691{
 692        dma_addr_t dma;
 693
 694        be_dws_le_to_cpu(wrb, sizeof(*wrb));
 695
 696        dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
 697        if (wrb->frag_len) {
 698                if (unmap_single)
 699                        dma_unmap_single(dev, dma, wrb->frag_len,
 700                                         DMA_TO_DEVICE);
 701                else
 702                        dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
 703        }
 704}
 705
 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
 707                struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
 708                bool skip_hw_vlan)
 709{
 710        dma_addr_t busaddr;
 711        int i, copied = 0;
 712        struct device *dev = &adapter->pdev->dev;
 713        struct sk_buff *first_skb = skb;
 714        struct be_eth_wrb *wrb;
 715        struct be_eth_hdr_wrb *hdr;
 716        bool map_single = false;
 717        u16 map_head;
 718
 719        hdr = queue_head_node(txq);
 720        queue_head_inc(txq);
 721        map_head = txq->head;
 722
 723        if (skb->len > skb->data_len) {
 724                int len = skb_headlen(skb);
 725                busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
 726                if (dma_mapping_error(dev, busaddr))
 727                        goto dma_err;
 728                map_single = true;
 729                wrb = queue_head_node(txq);
 730                wrb_fill(wrb, busaddr, len);
 731                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 732                queue_head_inc(txq);
 733                copied += len;
 734        }
 735
 736        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 737                const struct skb_frag_struct *frag =
 738                        &skb_shinfo(skb)->frags[i];
 739                busaddr = skb_frag_dma_map(dev, frag, 0,
 740                                           skb_frag_size(frag), DMA_TO_DEVICE);
 741                if (dma_mapping_error(dev, busaddr))
 742                        goto dma_err;
 743                wrb = queue_head_node(txq);
 744                wrb_fill(wrb, busaddr, skb_frag_size(frag));
 745                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 746                queue_head_inc(txq);
 747                copied += skb_frag_size(frag);
 748        }
 749
 750        if (dummy_wrb) {
 751                wrb = queue_head_node(txq);
 752                wrb_fill(wrb, 0, 0);
 753                be_dws_cpu_to_le(wrb, sizeof(*wrb));
 754                queue_head_inc(txq);
 755        }
 756
 757        wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
 758        be_dws_cpu_to_le(hdr, sizeof(*hdr));
 759
 760        return copied;
 761dma_err:
 762        txq->head = map_head;
 763        while (copied) {
 764                wrb = queue_head_node(txq);
 765                unmap_tx_frag(dev, wrb, map_single);
 766                map_single = false;
 767                copied -= wrb->frag_len;
 768                queue_head_inc(txq);
 769        }
 770        return 0;
 771}
 772
 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 774                                             struct sk_buff *skb,
 775                                             bool *skip_hw_vlan)
 776{
 777        u16 vlan_tag = 0;
 778
 779        skb = skb_share_check(skb, GFP_ATOMIC);
 780        if (unlikely(!skb))
 781                return skb;
 782
 783        if (vlan_tx_tag_present(skb))
 784                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 785
 786        if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
 787                if (!vlan_tag)
 788                        vlan_tag = adapter->pvid;
 789                /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
 790                 * skip VLAN insertion
 791                 */
 792                if (skip_hw_vlan)
 793                        *skip_hw_vlan = true;
 794        }
 795
 796        if (vlan_tag) {
 797                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 798                if (unlikely(!skb))
 799                        return skb;
 800                skb->vlan_tci = 0;
 801        }
 802
 803        /* Insert the outer VLAN, if any */
 804        if (adapter->qnq_vid) {
 805                vlan_tag = adapter->qnq_vid;
 806                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 807                if (unlikely(!skb))
 808                        return skb;
 809                if (skip_hw_vlan)
 810                        *skip_hw_vlan = true;
 811        }
 812
 813        return skb;
 814}
 815
 816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
 817{
 818        struct ethhdr *eh = (struct ethhdr *)skb->data;
 819        u16 offset = ETH_HLEN;
 820
 821        if (eh->h_proto == htons(ETH_P_IPV6)) {
 822                struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
 823
 824                offset += sizeof(struct ipv6hdr);
 825                if (ip6h->nexthdr != NEXTHDR_TCP &&
 826                    ip6h->nexthdr != NEXTHDR_UDP) {
 827                        struct ipv6_opt_hdr *ehdr =
 828                                (struct ipv6_opt_hdr *) (skb->data + offset);
 829
 830                        /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
 831                        if (ehdr->hdrlen == 0xff)
 832                                return true;
 833                }
 834        }
 835        return false;
 836}
 837
 838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
 839{
 840        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 841}
 842
 843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
 844                                struct sk_buff *skb)
 845{
 846        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 847}
 848
 849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
 850                                           struct sk_buff *skb,
 851                                           bool *skip_hw_vlan)
 852{
 853        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 854        unsigned int eth_hdr_len;
 855        struct iphdr *ip;
 856
 857        /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
 858         * may cause a transmit stall on that port. So the work-around is to
 859         * pad such packets to a 36-byte length.
 860         */
 861        if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
 862                if (skb_padto(skb, 36))
 863                        goto tx_drop;
 864                skb->len = 36;
 865        }
 866
 867        /* For padded packets, BE HW modifies tot_len field in IP header
 868         * incorrecly when VLAN tag is inserted by HW.
 869         * For padded packets, Lancer computes incorrect checksum.
 870         */
 871        eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
 872                                                VLAN_ETH_HLEN : ETH_HLEN;
 873        if (skb->len <= 60 &&
 874            (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
 875            is_ipv4_pkt(skb)) {
 876                ip = (struct iphdr *)ip_hdr(skb);
 877                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
 878        }
 879
 880        /* If vlan tag is already inlined in the packet, skip HW VLAN
 881         * tagging in UMC mode
 882         */
 883        if ((adapter->function_mode & UMC_ENABLED) &&
 884            veh->h_vlan_proto == htons(ETH_P_8021Q))
 885                        *skip_hw_vlan = true;
 886
 887        /* HW has a bug wherein it will calculate CSUM for VLAN
 888         * pkts even though it is disabled.
 889         * Manually insert VLAN in pkt.
 890         */
 891        if (skb->ip_summed != CHECKSUM_PARTIAL &&
 892            vlan_tx_tag_present(skb)) {
 893                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
 894                if (unlikely(!skb))
 895                        goto tx_drop;
 896        }
 897
 898        /* HW may lockup when VLAN HW tagging is requested on
 899         * certain ipv6 packets. Drop such pkts if the HW workaround to
 900         * skip HW tagging is not enabled by FW.
 901         */
 902        if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
 903            (adapter->pvid || adapter->qnq_vid) &&
 904            !qnq_async_evt_rcvd(adapter)))
 905                goto tx_drop;
 906
 907        /* Manual VLAN tag insertion to prevent:
 908         * ASIC lockup when the ASIC inserts VLAN tag into
 909         * certain ipv6 packets. Insert VLAN tags in driver,
 910         * and set event, completion, vlan bits accordingly
 911         * in the Tx WRB.
 912         */
 913        if (be_ipv6_tx_stall_chk(adapter, skb) &&
 914            be_vlan_tag_tx_chk(adapter, skb)) {
 915                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
 916                if (unlikely(!skb))
 917                        goto tx_drop;
 918        }
 919
 920        return skb;
 921tx_drop:
 922        dev_kfree_skb_any(skb);
 923        return NULL;
 924}
 925
 926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 927{
 928        struct be_adapter *adapter = netdev_priv(netdev);
 929        struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
 930        struct be_queue_info *txq = &txo->q;
 931        bool dummy_wrb, stopped = false;
 932        u32 wrb_cnt = 0, copied = 0;
 933        bool skip_hw_vlan = false;
 934        u32 start = txq->head;
 935
 936        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
 937        if (!skb)
 938                return NETDEV_TX_OK;
 939
 940        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 941
 942        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
 943                              skip_hw_vlan);
 944        if (copied) {
 945                int gso_segs = skb_shinfo(skb)->gso_segs;
 946
 947                /* record the sent skb in the sent_skb table */
 948                BUG_ON(txo->sent_skb_list[start]);
 949                txo->sent_skb_list[start] = skb;
 950
 951                /* Ensure txq has space for the next skb; Else stop the queue
 952                 * *BEFORE* ringing the tx doorbell, so that we serialze the
 953                 * tx compls of the current transmit which'll wake up the queue
 954                 */
 955                atomic_add(wrb_cnt, &txq->used);
 956                if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
 957                                                                txq->len) {
 958                        netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
 959                        stopped = true;
 960                }
 961
 962                be_txq_notify(adapter, txo, wrb_cnt);
 963
 964                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
 965        } else {
 966                txq->head = start;
 967                dev_kfree_skb_any(skb);
 968        }
 969        return NETDEV_TX_OK;
 970}
 971
 972static int be_change_mtu(struct net_device *netdev, int new_mtu)
 973{
 974        struct be_adapter *adapter = netdev_priv(netdev);
 975        if (new_mtu < BE_MIN_MTU ||
 976                        new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
 977                                        (ETH_HLEN + ETH_FCS_LEN))) {
 978                dev_info(&adapter->pdev->dev,
 979                        "MTU must be between %d and %d bytes\n",
 980                        BE_MIN_MTU,
 981                        (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
 982                return -EINVAL;
 983        }
 984        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
 985                        netdev->mtu, new_mtu);
 986        netdev->mtu = new_mtu;
 987        return 0;
 988}
 989
 990/*
 991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 992 * If the user configures more, place BE in vlan promiscuous mode.
 993 */
 994static int be_vid_config(struct be_adapter *adapter)
 995{
 996        u16 vids[BE_NUM_VLANS_SUPPORTED];
 997        u16 num = 0, i;
 998        int status = 0;
 999
1000        /* No need to further configure vids if in promiscuous mode */
1001        if (adapter->promiscuous)
1002                return 0;
1003
1004        if (adapter->vlans_added > adapter->max_vlans)
1005                goto set_vlan_promisc;
1006
1007        /* Construct VLAN Table to give to HW */
1008        for (i = 0; i < VLAN_N_VID; i++)
1009                if (adapter->vlan_tag[i])
1010                        vids[num++] = cpu_to_le16(i);
1011
1012        status = be_cmd_vlan_config(adapter, adapter->if_handle,
1013                                    vids, num, 1, 0);
1014
1015        /* Set to VLAN promisc mode as setting VLAN filter failed */
1016        if (status) {
1017                dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018                dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019                goto set_vlan_promisc;
1020        }
1021
1022        return status;
1023
1024set_vlan_promisc:
1025        status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026                                    NULL, 0, 1, 1);
1027        return status;
1028}
1029
1030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1031{
1032        struct be_adapter *adapter = netdev_priv(netdev);
1033        int status = 0;
1034
1035        if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1036                status = -EINVAL;
1037                goto ret;
1038        }
1039
1040        /* Packets with VID 0 are always received by Lancer by default */
1041        if (lancer_chip(adapter) && vid == 0)
1042                goto ret;
1043
1044        adapter->vlan_tag[vid] = 1;
1045        if (adapter->vlans_added <= (adapter->max_vlans + 1))
1046                status = be_vid_config(adapter);
1047
1048        if (!status)
1049                adapter->vlans_added++;
1050        else
1051                adapter->vlan_tag[vid] = 0;
1052ret:
1053        return status;
1054}
1055
1056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1057{
1058        struct be_adapter *adapter = netdev_priv(netdev);
1059        int status = 0;
1060
1061        if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1062                status = -EINVAL;
1063                goto ret;
1064        }
1065
1066        /* Packets with VID 0 are always received by Lancer by default */
1067        if (lancer_chip(adapter) && vid == 0)
1068                goto ret;
1069
1070        adapter->vlan_tag[vid] = 0;
1071        if (adapter->vlans_added <= adapter->max_vlans)
1072                status = be_vid_config(adapter);
1073
1074        if (!status)
1075                adapter->vlans_added--;
1076        else
1077                adapter->vlan_tag[vid] = 1;
1078ret:
1079        return status;
1080}
1081
1082static void be_set_rx_mode(struct net_device *netdev)
1083{
1084        struct be_adapter *adapter = netdev_priv(netdev);
1085        int status;
1086
1087        if (netdev->flags & IFF_PROMISC) {
1088                be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1089                adapter->promiscuous = true;
1090                goto done;
1091        }
1092
1093        /* BE was previously in promiscuous mode; disable it */
1094        if (adapter->promiscuous) {
1095                adapter->promiscuous = false;
1096                be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1097
1098                if (adapter->vlans_added)
1099                        be_vid_config(adapter);
1100        }
1101
1102        /* Enable multicast promisc if num configured exceeds what we support */
1103        if (netdev->flags & IFF_ALLMULTI ||
1104            netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1105                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1106                goto done;
1107        }
1108
1109        if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110                struct netdev_hw_addr *ha;
1111                int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113                for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114                        be_cmd_pmac_del(adapter, adapter->if_handle,
1115                                        adapter->pmac_id[i], 0);
1116                }
1117
1118                if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119                        be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120                        adapter->promiscuous = true;
1121                        goto done;
1122                }
1123
1124                netdev_for_each_uc_addr(ha, adapter->netdev) {
1125                        adapter->uc_macs++; /* First slot is for Primary MAC */
1126                        be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127                                        adapter->if_handle,
1128                                        &adapter->pmac_id[adapter->uc_macs], 0);
1129                }
1130        }
1131
1132        status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134        /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135        if (status) {
1136                dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137                dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139        }
1140done:
1141        return;
1142}
1143
1144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146        struct be_adapter *adapter = netdev_priv(netdev);
1147        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1148        int status;
1149        bool active_mac = false;
1150        u32 pmac_id;
1151        u8 old_mac[ETH_ALEN];
1152
1153        if (!sriov_enabled(adapter))
1154                return -EPERM;
1155
1156        if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1157                return -EINVAL;
1158
1159        if (lancer_chip(adapter)) {
1160                status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1161                                                  &pmac_id, vf + 1);
1162                if (!status && active_mac)
1163                        be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164                                        pmac_id, vf + 1);
1165
1166                status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1167        } else {
1168                status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169                                         vf_cfg->pmac_id, vf + 1);
1170
1171                status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172                                         &vf_cfg->pmac_id, vf + 1);
1173        }
1174
1175        if (status)
1176                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1177                                mac, vf);
1178        else
1179                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1180
1181        return status;
1182}
1183
1184static int be_get_vf_config(struct net_device *netdev, int vf,
1185                        struct ifla_vf_info *vi)
1186{
1187        struct be_adapter *adapter = netdev_priv(netdev);
1188        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1189
1190        if (!sriov_enabled(adapter))
1191                return -EPERM;
1192
1193        if (vf >= adapter->num_vfs)
1194                return -EINVAL;
1195
1196        vi->vf = vf;
1197        vi->tx_rate = vf_cfg->tx_rate;
1198        vi->vlan = vf_cfg->vlan_tag;
1199        vi->qos = 0;
1200        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1201
1202        return 0;
1203}
1204
1205static int be_set_vf_vlan(struct net_device *netdev,
1206                        int vf, u16 vlan, u8 qos)
1207{
1208        struct be_adapter *adapter = netdev_priv(netdev);
1209        int status = 0;
1210
1211        if (!sriov_enabled(adapter))
1212                return -EPERM;
1213
1214        if (vf >= adapter->num_vfs || vlan > 4095)
1215                return -EINVAL;
1216
1217        if (vlan) {
1218                if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1219                        /* If this is new value, program it. Else skip. */
1220                        adapter->vf_cfg[vf].vlan_tag = vlan;
1221
1222                        status = be_cmd_set_hsw_config(adapter, vlan,
1223                                vf + 1, adapter->vf_cfg[vf].if_handle);
1224                }
1225        } else {
1226                /* Reset Transparent Vlan Tagging. */
1227                adapter->vf_cfg[vf].vlan_tag = 0;
1228                vlan = adapter->vf_cfg[vf].def_vid;
1229                status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230                        adapter->vf_cfg[vf].if_handle);
1231        }
1232
1233
1234        if (status)
1235                dev_info(&adapter->pdev->dev,
1236                                "VLAN %d config on VF %d failed\n", vlan, vf);
1237        return status;
1238}
1239
1240static int be_set_vf_tx_rate(struct net_device *netdev,
1241                        int vf, int rate)
1242{
1243        struct be_adapter *adapter = netdev_priv(netdev);
1244        int status = 0;
1245
1246        if (!sriov_enabled(adapter))
1247                return -EPERM;
1248
1249        if (vf >= adapter->num_vfs)
1250                return -EINVAL;
1251
1252        if (rate < 100 || rate > 10000) {
1253                dev_err(&adapter->pdev->dev,
1254                        "tx rate must be between 100 and 10000 Mbps\n");
1255                return -EINVAL;
1256        }
1257
1258        if (lancer_chip(adapter))
1259                status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1260        else
1261                status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1262
1263        if (status)
1264                dev_err(&adapter->pdev->dev,
1265                                "tx rate %d on VF %d failed\n", rate, vf);
1266        else
1267                adapter->vf_cfg[vf].tx_rate = rate;
1268        return status;
1269}
1270
1271static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1272{
1273        struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1274        ulong now = jiffies;
1275        ulong delta = now - stats->rx_jiffies;
1276        u64 pkts;
1277        unsigned int start, eqd;
1278
1279        if (!eqo->enable_aic) {
1280                eqd = eqo->eqd;
1281                goto modify_eqd;
1282        }
1283
1284        if (eqo->idx >= adapter->num_rx_qs)
1285                return;
1286
1287        stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
1289        /* Wrapped around */
1290        if (time_before(now, stats->rx_jiffies)) {
1291                stats->rx_jiffies = now;
1292                return;
1293        }
1294
1295        /* Update once a second */
1296        if (delta < HZ)
1297                return;
1298
1299        do {
1300                start = u64_stats_fetch_begin_bh(&stats->sync);
1301                pkts = stats->rx_pkts;
1302        } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
1304        stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1305        stats->rx_pkts_prev = pkts;
1306        stats->rx_jiffies = now;
1307        eqd = (stats->rx_pps / 110000) << 3;
1308        eqd = min(eqd, eqo->max_eqd);
1309        eqd = max(eqd, eqo->min_eqd);
1310        if (eqd < 10)
1311                eqd = 0;
1312
1313modify_eqd:
1314        if (eqd != eqo->cur_eqd) {
1315                be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316                eqo->cur_eqd = eqd;
1317        }
1318}
1319
1320static void be_rx_stats_update(struct be_rx_obj *rxo,
1321                struct be_rx_compl_info *rxcp)
1322{
1323        struct be_rx_stats *stats = rx_stats(rxo);
1324
1325        u64_stats_update_begin(&stats->sync);
1326        stats->rx_compl++;
1327        stats->rx_bytes += rxcp->pkt_size;
1328        stats->rx_pkts++;
1329        if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1330                stats->rx_mcast_pkts++;
1331        if (rxcp->err)
1332                stats->rx_compl_err++;
1333        u64_stats_update_end(&stats->sync);
1334}
1335
1336static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1337{
1338        /* L4 checksum is not reliable for non TCP/UDP packets.
1339         * Also ignore ipcksm for ipv6 pkts */
1340        return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341                                (rxcp->ip_csum || rxcp->ipv6);
1342}
1343
1344static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345                                                u16 frag_idx)
1346{
1347        struct be_adapter *adapter = rxo->adapter;
1348        struct be_rx_page_info *rx_page_info;
1349        struct be_queue_info *rxq = &rxo->q;
1350
1351        rx_page_info = &rxo->page_info_tbl[frag_idx];
1352        BUG_ON(!rx_page_info->page);
1353
1354        if (rx_page_info->last_page_user) {
1355                dma_unmap_page(&adapter->pdev->dev,
1356                               dma_unmap_addr(rx_page_info, bus),
1357                               adapter->big_page_size, DMA_FROM_DEVICE);
1358                rx_page_info->last_page_user = false;
1359        }
1360
1361        atomic_dec(&rxq->used);
1362        return rx_page_info;
1363}
1364
1365/* Throwaway the data in the Rx completion */
1366static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367                                struct be_rx_compl_info *rxcp)
1368{
1369        struct be_queue_info *rxq = &rxo->q;
1370        struct be_rx_page_info *page_info;
1371        u16 i, num_rcvd = rxcp->num_rcvd;
1372
1373        for (i = 0; i < num_rcvd; i++) {
1374                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1375                put_page(page_info->page);
1376                memset(page_info, 0, sizeof(*page_info));
1377                index_inc(&rxcp->rxq_idx, rxq->len);
1378        }
1379}
1380
1381/*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
1385static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386                             struct be_rx_compl_info *rxcp)
1387{
1388        struct be_queue_info *rxq = &rxo->q;
1389        struct be_rx_page_info *page_info;
1390        u16 i, j;
1391        u16 hdr_len, curr_frag_len, remaining;
1392        u8 *start;
1393
1394        page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1395        start = page_address(page_info->page) + page_info->page_offset;
1396        prefetch(start);
1397
1398        /* Copy data in the first descriptor of this completion */
1399        curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1400
1401        skb->len = curr_frag_len;
1402        if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1403                memcpy(skb->data, start, curr_frag_len);
1404                /* Complete packet has now been moved to data */
1405                put_page(page_info->page);
1406                skb->data_len = 0;
1407                skb->tail += curr_frag_len;
1408        } else {
1409                hdr_len = ETH_HLEN;
1410                memcpy(skb->data, start, hdr_len);
1411                skb_shinfo(skb)->nr_frags = 1;
1412                skb_frag_set_page(skb, 0, page_info->page);
1413                skb_shinfo(skb)->frags[0].page_offset =
1414                                        page_info->page_offset + hdr_len;
1415                skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1416                skb->data_len = curr_frag_len - hdr_len;
1417                skb->truesize += rx_frag_size;
1418                skb->tail += hdr_len;
1419        }
1420        page_info->page = NULL;
1421
1422        if (rxcp->pkt_size <= rx_frag_size) {
1423                BUG_ON(rxcp->num_rcvd != 1);
1424                return;
1425        }
1426
1427        /* More frags present for this completion */
1428        index_inc(&rxcp->rxq_idx, rxq->len);
1429        remaining = rxcp->pkt_size - curr_frag_len;
1430        for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1431                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1432                curr_frag_len = min(remaining, rx_frag_size);
1433
1434                /* Coalesce all frags from the same physical page in one slot */
1435                if (page_info->page_offset == 0) {
1436                        /* Fresh page */
1437                        j++;
1438                        skb_frag_set_page(skb, j, page_info->page);
1439                        skb_shinfo(skb)->frags[j].page_offset =
1440                                                        page_info->page_offset;
1441                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1442                        skb_shinfo(skb)->nr_frags++;
1443                } else {
1444                        put_page(page_info->page);
1445                }
1446
1447                skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1448                skb->len += curr_frag_len;
1449                skb->data_len += curr_frag_len;
1450                skb->truesize += rx_frag_size;
1451                remaining -= curr_frag_len;
1452                index_inc(&rxcp->rxq_idx, rxq->len);
1453                page_info->page = NULL;
1454        }
1455        BUG_ON(j > MAX_SKB_FRAGS);
1456}
1457
1458/* Process the RX completion indicated by rxcp when GRO is disabled */
1459static void be_rx_compl_process(struct be_rx_obj *rxo,
1460                                struct be_rx_compl_info *rxcp)
1461{
1462        struct be_adapter *adapter = rxo->adapter;
1463        struct net_device *netdev = adapter->netdev;
1464        struct sk_buff *skb;
1465
1466        skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1467        if (unlikely(!skb)) {
1468                rx_stats(rxo)->rx_drops_no_skbs++;
1469                be_rx_compl_discard(rxo, rxcp);
1470                return;
1471        }
1472
1473        skb_fill_rx_data(rxo, skb, rxcp);
1474
1475        if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1476                skb->ip_summed = CHECKSUM_UNNECESSARY;
1477        else
1478                skb_checksum_none_assert(skb);
1479
1480        skb->protocol = eth_type_trans(skb, netdev);
1481        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1482        if (netdev->features & NETIF_F_RXHASH)
1483                skb->rxhash = rxcp->rss_hash;
1484
1485
1486        if (rxcp->vlanf)
1487                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1488
1489        netif_receive_skb(skb);
1490}
1491
1492/* Process the RX completion indicated by rxcp when GRO is enabled */
1493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494                             struct be_rx_compl_info *rxcp)
1495{
1496        struct be_adapter *adapter = rxo->adapter;
1497        struct be_rx_page_info *page_info;
1498        struct sk_buff *skb = NULL;
1499        struct be_queue_info *rxq = &rxo->q;
1500        u16 remaining, curr_frag_len;
1501        u16 i, j;
1502
1503        skb = napi_get_frags(napi);
1504        if (!skb) {
1505                be_rx_compl_discard(rxo, rxcp);
1506                return;
1507        }
1508
1509        remaining = rxcp->pkt_size;
1510        for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1511                page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1512
1513                curr_frag_len = min(remaining, rx_frag_size);
1514
1515                /* Coalesce all frags from the same physical page in one slot */
1516                if (i == 0 || page_info->page_offset == 0) {
1517                        /* First frag or Fresh page */
1518                        j++;
1519                        skb_frag_set_page(skb, j, page_info->page);
1520                        skb_shinfo(skb)->frags[j].page_offset =
1521                                                        page_info->page_offset;
1522                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1523                } else {
1524                        put_page(page_info->page);
1525                }
1526                skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1527                skb->truesize += rx_frag_size;
1528                remaining -= curr_frag_len;
1529                index_inc(&rxcp->rxq_idx, rxq->len);
1530                memset(page_info, 0, sizeof(*page_info));
1531        }
1532        BUG_ON(j > MAX_SKB_FRAGS);
1533
1534        skb_shinfo(skb)->nr_frags = j + 1;
1535        skb->len = rxcp->pkt_size;
1536        skb->data_len = rxcp->pkt_size;
1537        skb->ip_summed = CHECKSUM_UNNECESSARY;
1538        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1539        if (adapter->netdev->features & NETIF_F_RXHASH)
1540                skb->rxhash = rxcp->rss_hash;
1541
1542        if (rxcp->vlanf)
1543                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1544
1545        napi_gro_frags(napi);
1546}
1547
1548static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549                                 struct be_rx_compl_info *rxcp)
1550{
1551        rxcp->pkt_size =
1552                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553        rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554        rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555        rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1556        rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1557        rxcp->ip_csum =
1558                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559        rxcp->l4_csum =
1560                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561        rxcp->ipv6 =
1562                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563        rxcp->rxq_idx =
1564                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565        rxcp->num_rcvd =
1566                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567        rxcp->pkt_type =
1568                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1569        rxcp->rss_hash =
1570                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1571        if (rxcp->vlanf) {
1572                rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1573                                          compl);
1574                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575                                               compl);
1576        }
1577        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1578}
1579
1580static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581                                 struct be_rx_compl_info *rxcp)
1582{
1583        rxcp->pkt_size =
1584                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585        rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586        rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587        rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1588        rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1589        rxcp->ip_csum =
1590                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591        rxcp->l4_csum =
1592                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593        rxcp->ipv6 =
1594                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595        rxcp->rxq_idx =
1596                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597        rxcp->num_rcvd =
1598                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599        rxcp->pkt_type =
1600                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1601        rxcp->rss_hash =
1602                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1603        if (rxcp->vlanf) {
1604                rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1605                                          compl);
1606                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607                                               compl);
1608        }
1609        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1610        rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1611                                      ip_frag, compl);
1612}
1613
1614static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1615{
1616        struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1617        struct be_rx_compl_info *rxcp = &rxo->rxcp;
1618        struct be_adapter *adapter = rxo->adapter;
1619
1620        /* For checking the valid bit it is Ok to use either definition as the
1621         * valid bit is at the same position in both v0 and v1 Rx compl */
1622        if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1623                return NULL;
1624
1625        rmb();
1626        be_dws_le_to_cpu(compl, sizeof(*compl));
1627
1628        if (adapter->be3_native)
1629                be_parse_rx_compl_v1(compl, rxcp);
1630        else
1631                be_parse_rx_compl_v0(compl, rxcp);
1632
1633        if (rxcp->ip_frag)
1634                rxcp->l4_csum = 0;
1635
1636        if (rxcp->vlanf) {
1637                /* vlanf could be wrongly set in some cards.
1638                 * ignore if vtm is not set */
1639                if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1640                        rxcp->vlanf = 0;
1641
1642                if (!lancer_chip(adapter))
1643                        rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1644
1645                if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1646                    !adapter->vlan_tag[rxcp->vlan_tag])
1647                        rxcp->vlanf = 0;
1648        }
1649
1650        /* As the compl has been parsed, reset it; we wont touch it again */
1651        compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1652
1653        queue_tail_inc(&rxo->cq);
1654        return rxcp;
1655}
1656
1657static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1658{
1659        u32 order = get_order(size);
1660
1661        if (order > 0)
1662                gfp |= __GFP_COMP;
1663        return  alloc_pages(gfp, order);
1664}
1665
1666/*
1667 * Allocate a page, split it to fragments of size rx_frag_size and post as
1668 * receive buffers to BE
1669 */
1670static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1671{
1672        struct be_adapter *adapter = rxo->adapter;
1673        struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1674        struct be_queue_info *rxq = &rxo->q;
1675        struct page *pagep = NULL;
1676        struct be_eth_rx_d *rxd;
1677        u64 page_dmaaddr = 0, frag_dmaaddr;
1678        u32 posted, page_offset = 0;
1679
1680        page_info = &rxo->page_info_tbl[rxq->head];
1681        for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1682                if (!pagep) {
1683                        pagep = be_alloc_pages(adapter->big_page_size, gfp);
1684                        if (unlikely(!pagep)) {
1685                                rx_stats(rxo)->rx_post_fail++;
1686                                break;
1687                        }
1688                        page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1689                                                    0, adapter->big_page_size,
1690                                                    DMA_FROM_DEVICE);
1691                        page_info->page_offset = 0;
1692                } else {
1693                        get_page(pagep);
1694                        page_info->page_offset = page_offset + rx_frag_size;
1695                }
1696                page_offset = page_info->page_offset;
1697                page_info->page = pagep;
1698                dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1699                frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1700
1701                rxd = queue_head_node(rxq);
1702                rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1703                rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1704
1705                /* Any space left in the current big page for another frag? */
1706                if ((page_offset + rx_frag_size + rx_frag_size) >
1707                                        adapter->big_page_size) {
1708                        pagep = NULL;
1709                        page_info->last_page_user = true;
1710                }
1711
1712                prev_page_info = page_info;
1713                queue_head_inc(rxq);
1714                page_info = &rxo->page_info_tbl[rxq->head];
1715        }
1716        if (pagep)
1717                prev_page_info->last_page_user = true;
1718
1719        if (posted) {
1720                atomic_add(posted, &rxq->used);
1721                be_rxq_notify(adapter, rxq->id, posted);
1722        } else if (atomic_read(&rxq->used) == 0) {
1723                /* Let be_worker replenish when memory is available */
1724                rxo->rx_post_starved = true;
1725        }
1726}
1727
1728static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1729{
1730        struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1731
1732        if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1733                return NULL;
1734
1735        rmb();
1736        be_dws_le_to_cpu(txcp, sizeof(*txcp));
1737
1738        txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1739
1740        queue_tail_inc(tx_cq);
1741        return txcp;
1742}
1743
1744static u16 be_tx_compl_process(struct be_adapter *adapter,
1745                struct be_tx_obj *txo, u16 last_index)
1746{
1747        struct be_queue_info *txq = &txo->q;
1748        struct be_eth_wrb *wrb;
1749        struct sk_buff **sent_skbs = txo->sent_skb_list;
1750        struct sk_buff *sent_skb;
1751        u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1752        bool unmap_skb_hdr = true;
1753
1754        sent_skb = sent_skbs[txq->tail];
1755        BUG_ON(!sent_skb);
1756        sent_skbs[txq->tail] = NULL;
1757
1758        /* skip header wrb */
1759        queue_tail_inc(txq);
1760
1761        do {
1762                cur_index = txq->tail;
1763                wrb = queue_tail_node(txq);
1764                unmap_tx_frag(&adapter->pdev->dev, wrb,
1765                              (unmap_skb_hdr && skb_headlen(sent_skb)));
1766                unmap_skb_hdr = false;
1767
1768                num_wrbs++;
1769                queue_tail_inc(txq);
1770        } while (cur_index != last_index);
1771
1772        kfree_skb(sent_skb);
1773        return num_wrbs;
1774}
1775
1776/* Return the number of events in the event queue */
1777static inline int events_get(struct be_eq_obj *eqo)
1778{
1779        struct be_eq_entry *eqe;
1780        int num = 0;
1781
1782        do {
1783                eqe = queue_tail_node(&eqo->q);
1784                if (eqe->evt == 0)
1785                        break;
1786
1787                rmb();
1788                eqe->evt = 0;
1789                num++;
1790                queue_tail_inc(&eqo->q);
1791        } while (true);
1792
1793        return num;
1794}
1795
1796/* Leaves the EQ is disarmed state */
1797static void be_eq_clean(struct be_eq_obj *eqo)
1798{
1799        int num = events_get(eqo);
1800
1801        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1802}
1803
1804static void be_rx_cq_clean(struct be_rx_obj *rxo)
1805{
1806        struct be_rx_page_info *page_info;
1807        struct be_queue_info *rxq = &rxo->q;
1808        struct be_queue_info *rx_cq = &rxo->cq;
1809        struct be_rx_compl_info *rxcp;
1810        struct be_adapter *adapter = rxo->adapter;
1811        int flush_wait = 0;
1812        u16 tail;
1813
1814        /* Consume pending rx completions.
1815         * Wait for the flush completion (identified by zero num_rcvd)
1816         * to arrive. Notify CQ even when there are no more CQ entries
1817         * for HW to flush partially coalesced CQ entries.
1818         * In Lancer, there is no need to wait for flush compl.
1819         */
1820        for (;;) {
1821                rxcp = be_rx_compl_get(rxo);
1822                if (rxcp == NULL) {
1823                        if (lancer_chip(adapter))
1824                                break;
1825
1826                        if (flush_wait++ > 10 || be_hw_error(adapter)) {
1827                                dev_warn(&adapter->pdev->dev,
1828                                         "did not receive flush compl\n");
1829                                break;
1830                        }
1831                        be_cq_notify(adapter, rx_cq->id, true, 0);
1832                        mdelay(1);
1833                } else {
1834                        be_rx_compl_discard(rxo, rxcp);
1835                        be_cq_notify(adapter, rx_cq->id, false, 1);
1836                        if (rxcp->num_rcvd == 0)
1837                                break;
1838                }
1839        }
1840
1841        /* After cleanup, leave the CQ in unarmed state */
1842        be_cq_notify(adapter, rx_cq->id, false, 0);
1843
1844        /* Then free posted rx buffers that were not used */
1845        tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1846        for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1847                page_info = get_rx_page_info(rxo, tail);
1848                put_page(page_info->page);
1849                memset(page_info, 0, sizeof(*page_info));
1850        }
1851        BUG_ON(atomic_read(&rxq->used));
1852        rxq->tail = rxq->head = 0;
1853}
1854
1855static void be_tx_compl_clean(struct be_adapter *adapter)
1856{
1857        struct be_tx_obj *txo;
1858        struct be_queue_info *txq;
1859        struct be_eth_tx_compl *txcp;
1860        u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1861        struct sk_buff *sent_skb;
1862        bool dummy_wrb;
1863        int i, pending_txqs;
1864
1865        /* Wait for a max of 200ms for all the tx-completions to arrive. */
1866        do {
1867                pending_txqs = adapter->num_tx_qs;
1868
1869                for_all_tx_queues(adapter, txo, i) {
1870                        txq = &txo->q;
1871                        while ((txcp = be_tx_compl_get(&txo->cq))) {
1872                                end_idx =
1873                                        AMAP_GET_BITS(struct amap_eth_tx_compl,
1874                                                      wrb_index, txcp);
1875                                num_wrbs += be_tx_compl_process(adapter, txo,
1876                                                                end_idx);
1877                                cmpl++;
1878                        }
1879                        if (cmpl) {
1880                                be_cq_notify(adapter, txo->cq.id, false, cmpl);
1881                                atomic_sub(num_wrbs, &txq->used);
1882                                cmpl = 0;
1883                                num_wrbs = 0;
1884                        }
1885                        if (atomic_read(&txq->used) == 0)
1886                                pending_txqs--;
1887                }
1888
1889                if (pending_txqs == 0 || ++timeo > 200)
1890                        break;
1891
1892                mdelay(1);
1893        } while (true);
1894
1895        for_all_tx_queues(adapter, txo, i) {
1896                txq = &txo->q;
1897                if (atomic_read(&txq->used))
1898                        dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1899                                atomic_read(&txq->used));
1900
1901                /* free posted tx for which compls will never arrive */
1902                while (atomic_read(&txq->used)) {
1903                        sent_skb = txo->sent_skb_list[txq->tail];
1904                        end_idx = txq->tail;
1905                        num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1906                                                   &dummy_wrb);
1907                        index_adv(&end_idx, num_wrbs - 1, txq->len);
1908                        num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1909                        atomic_sub(num_wrbs, &txq->used);
1910                }
1911        }
1912}
1913
1914static void be_evt_queues_destroy(struct be_adapter *adapter)
1915{
1916        struct be_eq_obj *eqo;
1917        int i;
1918
1919        for_all_evt_queues(adapter, eqo, i) {
1920                if (eqo->q.created) {
1921                        be_eq_clean(eqo);
1922                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1923                }
1924                be_queue_free(adapter, &eqo->q);
1925        }
1926}
1927
1928static int be_evt_queues_create(struct be_adapter *adapter)
1929{
1930        struct be_queue_info *eq;
1931        struct be_eq_obj *eqo;
1932        int i, rc;
1933
1934        adapter->num_evt_qs = num_irqs(adapter);
1935
1936        for_all_evt_queues(adapter, eqo, i) {
1937                eqo->adapter = adapter;
1938                eqo->tx_budget = BE_TX_BUDGET;
1939                eqo->idx = i;
1940                eqo->max_eqd = BE_MAX_EQD;
1941                eqo->enable_aic = true;
1942
1943                eq = &eqo->q;
1944                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1945                                        sizeof(struct be_eq_entry));
1946                if (rc)
1947                        return rc;
1948
1949                rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1950                if (rc)
1951                        return rc;
1952        }
1953        return 0;
1954}
1955
1956static void be_mcc_queues_destroy(struct be_adapter *adapter)
1957{
1958        struct be_queue_info *q;
1959
1960        q = &adapter->mcc_obj.q;
1961        if (q->created)
1962                be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1963        be_queue_free(adapter, q);
1964
1965        q = &adapter->mcc_obj.cq;
1966        if (q->created)
1967                be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1968        be_queue_free(adapter, q);
1969}
1970
1971/* Must be called only after TX qs are created as MCC shares TX EQ */
1972static int be_mcc_queues_create(struct be_adapter *adapter)
1973{
1974        struct be_queue_info *q, *cq;
1975
1976        cq = &adapter->mcc_obj.cq;
1977        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1978                        sizeof(struct be_mcc_compl)))
1979                goto err;
1980
1981        /* Use the default EQ for MCC completions */
1982        if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1983                goto mcc_cq_free;
1984
1985        q = &adapter->mcc_obj.q;
1986        if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1987                goto mcc_cq_destroy;
1988
1989        if (be_cmd_mccq_create(adapter, q, cq))
1990                goto mcc_q_free;
1991
1992        return 0;
1993
1994mcc_q_free:
1995        be_queue_free(adapter, q);
1996mcc_cq_destroy:
1997        be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1998mcc_cq_free:
1999        be_queue_free(adapter, cq);
2000err:
2001        return -1;
2002}
2003
2004static void be_tx_queues_destroy(struct be_adapter *adapter)
2005{
2006        struct be_queue_info *q;
2007        struct be_tx_obj *txo;
2008        u8 i;
2009
2010        for_all_tx_queues(adapter, txo, i) {
2011                q = &txo->q;
2012                if (q->created)
2013                        be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2014                be_queue_free(adapter, q);
2015
2016                q = &txo->cq;
2017                if (q->created)
2018                        be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2019                be_queue_free(adapter, q);
2020        }
2021}
2022
2023static int be_num_txqs_want(struct be_adapter *adapter)
2024{
2025        if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026            be_is_mc(adapter) ||
2027            (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2028            BE2_chip(adapter))
2029                return 1;
2030        else
2031                return adapter->max_tx_queues;
2032}
2033
2034static int be_tx_cqs_create(struct be_adapter *adapter)
2035{
2036        struct be_queue_info *cq, *eq;
2037        int status;
2038        struct be_tx_obj *txo;
2039        u8 i;
2040
2041        adapter->num_tx_qs = be_num_txqs_want(adapter);
2042        if (adapter->num_tx_qs != MAX_TX_QS) {
2043                rtnl_lock();
2044                netif_set_real_num_tx_queues(adapter->netdev,
2045                        adapter->num_tx_qs);
2046                rtnl_unlock();
2047        }
2048
2049        for_all_tx_queues(adapter, txo, i) {
2050                cq = &txo->cq;
2051                status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2052                                        sizeof(struct be_eth_tx_compl));
2053                if (status)
2054                        return status;
2055
2056                /* If num_evt_qs is less than num_tx_qs, then more than
2057                 * one txq share an eq
2058                 */
2059                eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2060                status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061                if (status)
2062                        return status;
2063        }
2064        return 0;
2065}
2066
2067static int be_tx_qs_create(struct be_adapter *adapter)
2068{
2069        struct be_tx_obj *txo;
2070        int i, status;
2071
2072        for_all_tx_queues(adapter, txo, i) {
2073                status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074                                        sizeof(struct be_eth_wrb));
2075                if (status)
2076                        return status;
2077
2078                status = be_cmd_txq_create(adapter, txo);
2079                if (status)
2080                        return status;
2081        }
2082
2083        dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2084                 adapter->num_tx_qs);
2085        return 0;
2086}
2087
2088static void be_rx_cqs_destroy(struct be_adapter *adapter)
2089{
2090        struct be_queue_info *q;
2091        struct be_rx_obj *rxo;
2092        int i;
2093
2094        for_all_rx_queues(adapter, rxo, i) {
2095                q = &rxo->cq;
2096                if (q->created)
2097                        be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2098                be_queue_free(adapter, q);
2099        }
2100}
2101
2102static int be_rx_cqs_create(struct be_adapter *adapter)
2103{
2104        struct be_queue_info *eq, *cq;
2105        struct be_rx_obj *rxo;
2106        int rc, i;
2107
2108        /* We'll create as many RSS rings as there are irqs.
2109         * But when there's only one irq there's no use creating RSS rings
2110         */
2111        adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2112                                num_irqs(adapter) + 1 : 1;
2113        if (adapter->num_rx_qs != MAX_RX_QS) {
2114                rtnl_lock();
2115                netif_set_real_num_rx_queues(adapter->netdev,
2116                                             adapter->num_rx_qs);
2117                rtnl_unlock();
2118        }
2119
2120        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2121        for_all_rx_queues(adapter, rxo, i) {
2122                rxo->adapter = adapter;
2123                cq = &rxo->cq;
2124                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2125                                sizeof(struct be_eth_rx_compl));
2126                if (rc)
2127                        return rc;
2128
2129                eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2130                rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2131                if (rc)
2132                        return rc;
2133        }
2134
2135        dev_info(&adapter->pdev->dev,
2136                 "created %d RSS queue(s) and 1 default RX queue\n",
2137                 adapter->num_rx_qs - 1);
2138        return 0;
2139}
2140
2141static irqreturn_t be_intx(int irq, void *dev)
2142{
2143        struct be_eq_obj *eqo = dev;
2144        struct be_adapter *adapter = eqo->adapter;
2145        int num_evts = 0;
2146
2147        /* IRQ is not expected when NAPI is scheduled as the EQ
2148         * will not be armed.
2149         * But, this can happen on Lancer INTx where it takes
2150         * a while to de-assert INTx or in BE2 where occasionaly
2151         * an interrupt may be raised even when EQ is unarmed.
2152         * If NAPI is already scheduled, then counting & notifying
2153         * events will orphan them.
2154         */
2155        if (napi_schedule_prep(&eqo->napi)) {
2156                num_evts = events_get(eqo);
2157                __napi_schedule(&eqo->napi);
2158                if (num_evts)
2159                        eqo->spurious_intr = 0;
2160        }
2161        be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2162
2163        /* Return IRQ_HANDLED only for the the first spurious intr
2164         * after a valid intr to stop the kernel from branding
2165         * this irq as a bad one!
2166         */
2167        if (num_evts || eqo->spurious_intr++ == 0)
2168                return IRQ_HANDLED;
2169        else
2170                return IRQ_NONE;
2171}
2172
2173static irqreturn_t be_msix(int irq, void *dev)
2174{
2175        struct be_eq_obj *eqo = dev;
2176
2177        be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2178        napi_schedule(&eqo->napi);
2179        return IRQ_HANDLED;
2180}
2181
2182static inline bool do_gro(struct be_rx_compl_info *rxcp)
2183{
2184        return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2185}
2186
2187static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2188                        int budget)
2189{
2190        struct be_adapter *adapter = rxo->adapter;
2191        struct be_queue_info *rx_cq = &rxo->cq;
2192        struct be_rx_compl_info *rxcp;
2193        u32 work_done;
2194
2195        for (work_done = 0; work_done < budget; work_done++) {
2196                rxcp = be_rx_compl_get(rxo);
2197                if (!rxcp)
2198                        break;
2199
2200                /* Is it a flush compl that has no data */
2201                if (unlikely(rxcp->num_rcvd == 0))
2202                        goto loop_continue;
2203
2204                /* Discard compl with partial DMA Lancer B0 */
2205                if (unlikely(!rxcp->pkt_size)) {
2206                        be_rx_compl_discard(rxo, rxcp);
2207                        goto loop_continue;
2208                }
2209
2210                /* On BE drop pkts that arrive due to imperfect filtering in
2211                 * promiscuous mode on some skews
2212                 */
2213                if (unlikely(rxcp->port != adapter->port_num &&
2214                                !lancer_chip(adapter))) {
2215                        be_rx_compl_discard(rxo, rxcp);
2216                        goto loop_continue;
2217                }
2218
2219                if (do_gro(rxcp))
2220                        be_rx_compl_process_gro(rxo, napi, rxcp);
2221                else
2222                        be_rx_compl_process(rxo, rxcp);
2223loop_continue:
2224                be_rx_stats_update(rxo, rxcp);
2225        }
2226
2227        if (work_done) {
2228                be_cq_notify(adapter, rx_cq->id, true, work_done);
2229
2230                if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2231                        be_post_rx_frags(rxo, GFP_ATOMIC);
2232        }
2233
2234        return work_done;
2235}
2236
2237static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2238                          int budget, int idx)
2239{
2240        struct be_eth_tx_compl *txcp;
2241        int num_wrbs = 0, work_done;
2242
2243        for (work_done = 0; work_done < budget; work_done++) {
2244                txcp = be_tx_compl_get(&txo->cq);
2245                if (!txcp)
2246                        break;
2247                num_wrbs += be_tx_compl_process(adapter, txo,
2248                                AMAP_GET_BITS(struct amap_eth_tx_compl,
2249                                        wrb_index, txcp));
2250        }
2251
2252        if (work_done) {
2253                be_cq_notify(adapter, txo->cq.id, true, work_done);
2254                atomic_sub(num_wrbs, &txo->q.used);
2255
2256                /* As Tx wrbs have been freed up, wake up netdev queue
2257                 * if it was stopped due to lack of tx wrbs.  */
2258                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2259                        atomic_read(&txo->q.used) < txo->q.len / 2) {
2260                        netif_wake_subqueue(adapter->netdev, idx);
2261                }
2262
2263                u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2264                tx_stats(txo)->tx_compl += work_done;
2265                u64_stats_update_end(&tx_stats(txo)->sync_compl);
2266        }
2267        return (work_done < budget); /* Done */
2268}
2269
2270int be_poll(struct napi_struct *napi, int budget)
2271{
2272        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2273        struct be_adapter *adapter = eqo->adapter;
2274        int max_work = 0, work, i, num_evts;
2275        bool tx_done;
2276
2277        num_evts = events_get(eqo);
2278
2279        /* Process all TXQs serviced by this EQ */
2280        for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2281                tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2282                                        eqo->tx_budget, i);
2283                if (!tx_done)
2284                        max_work = budget;
2285        }
2286
2287        /* This loop will iterate twice for EQ0 in which
2288         * completions of the last RXQ (default one) are also processed
2289         * For other EQs the loop iterates only once
2290         */
2291        for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2292                work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2293                max_work = max(work, max_work);
2294        }
2295
2296        if (is_mcc_eqo(eqo))
2297                be_process_mcc(adapter);
2298
2299        if (max_work < budget) {
2300                napi_complete(napi);
2301                be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2302        } else {
2303                /* As we'll continue in polling mode, count and clear events */
2304                be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2305        }
2306        return max_work;
2307}
2308
2309void be_detect_error(struct be_adapter *adapter)
2310{
2311        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2312        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2313        u32 i;
2314
2315        if (be_hw_error(adapter))
2316                return;
2317
2318        if (lancer_chip(adapter)) {
2319                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2320                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2321                        sliport_err1 = ioread32(adapter->db +
2322                                        SLIPORT_ERROR1_OFFSET);
2323                        sliport_err2 = ioread32(adapter->db +
2324                                        SLIPORT_ERROR2_OFFSET);
2325                }
2326        } else {
2327                pci_read_config_dword(adapter->pdev,
2328                                PCICFG_UE_STATUS_LOW, &ue_lo);
2329                pci_read_config_dword(adapter->pdev,
2330                                PCICFG_UE_STATUS_HIGH, &ue_hi);
2331                pci_read_config_dword(adapter->pdev,
2332                                PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2333                pci_read_config_dword(adapter->pdev,
2334                                PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2335
2336                ue_lo = (ue_lo & ~ue_lo_mask);
2337                ue_hi = (ue_hi & ~ue_hi_mask);
2338        }
2339
2340        /* On certain platforms BE hardware can indicate spurious UEs.
2341         * Allow the h/w to stop working completely in case of a real UE.
2342         * Hence not setting the hw_error for UE detection.
2343         */
2344        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2345                adapter->hw_error = true;
2346                dev_err(&adapter->pdev->dev,
2347                        "Error detected in the card\n");
2348        }
2349
2350        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2351                dev_err(&adapter->pdev->dev,
2352                        "ERR: sliport status 0x%x\n", sliport_status);
2353                dev_err(&adapter->pdev->dev,
2354                        "ERR: sliport error1 0x%x\n", sliport_err1);
2355                dev_err(&adapter->pdev->dev,
2356                        "ERR: sliport error2 0x%x\n", sliport_err2);
2357        }
2358
2359        if (ue_lo) {
2360                for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2361                        if (ue_lo & 1)
2362                                dev_err(&adapter->pdev->dev,
2363                                "UE: %s bit set\n", ue_status_low_desc[i]);
2364                }
2365        }
2366
2367        if (ue_hi) {
2368                for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2369                        if (ue_hi & 1)
2370                                dev_err(&adapter->pdev->dev,
2371                                "UE: %s bit set\n", ue_status_hi_desc[i]);
2372                }
2373        }
2374
2375}
2376
2377static void be_msix_disable(struct be_adapter *adapter)
2378{
2379        if (msix_enabled(adapter)) {
2380                pci_disable_msix(adapter->pdev);
2381                adapter->num_msix_vec = 0;
2382        }
2383}
2384
2385static uint be_num_rss_want(struct be_adapter *adapter)
2386{
2387        u32 num = 0;
2388
2389        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2390            (lancer_chip(adapter) ||
2391             (!sriov_want(adapter) && be_physfn(adapter)))) {
2392                num = adapter->max_rss_queues;
2393                num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394        }
2395        return num;
2396}
2397
2398static int be_msix_enable(struct be_adapter *adapter)
2399{
2400#define BE_MIN_MSIX_VECTORS             1
2401        int i, status, num_vec, num_roce_vec = 0;
2402        struct device *dev = &adapter->pdev->dev;
2403
2404        /* If RSS queues are not used, need a vec for default RX Q */
2405        num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2406        if (be_roce_supported(adapter)) {
2407                num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2408                                        (num_online_cpus() + 1));
2409                num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2410                num_vec += num_roce_vec;
2411                num_vec = min(num_vec, MAX_MSIX_VECTORS);
2412        }
2413        num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2414
2415        for (i = 0; i < num_vec; i++)
2416                adapter->msix_entries[i].entry = i;
2417
2418        status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2419        if (status == 0) {
2420                goto done;
2421        } else if (status >= BE_MIN_MSIX_VECTORS) {
2422                num_vec = status;
2423                status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424                                         num_vec);
2425                if (!status)
2426                        goto done;
2427        }
2428
2429        dev_warn(dev, "MSIx enable failed\n");
2430        /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431        if (!be_physfn(adapter))
2432                return status;
2433        return 0;
2434done:
2435        if (be_roce_supported(adapter)) {
2436                if (num_vec > num_roce_vec) {
2437                        adapter->num_msix_vec = num_vec - num_roce_vec;
2438                        adapter->num_msix_roce_vec =
2439                                num_vec - adapter->num_msix_vec;
2440                } else {
2441                        adapter->num_msix_vec = num_vec;
2442                        adapter->num_msix_roce_vec = 0;
2443                }
2444        } else
2445                adapter->num_msix_vec = num_vec;
2446        dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2447        return 0;
2448}
2449
2450static inline int be_msix_vec_get(struct be_adapter *adapter,
2451                                struct be_eq_obj *eqo)
2452{
2453        return adapter->msix_entries[eqo->idx].vector;
2454}
2455
2456static int be_msix_register(struct be_adapter *adapter)
2457{
2458        struct net_device *netdev = adapter->netdev;
2459        struct be_eq_obj *eqo;
2460        int status, i, vec;
2461
2462        for_all_evt_queues(adapter, eqo, i) {
2463                sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2464                vec = be_msix_vec_get(adapter, eqo);
2465                status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2466                if (status)
2467                        goto err_msix;
2468        }
2469
2470        return 0;
2471err_msix:
2472        for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2473                free_irq(be_msix_vec_get(adapter, eqo), eqo);
2474        dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2475                status);
2476        be_msix_disable(adapter);
2477        return status;
2478}
2479
2480static int be_irq_register(struct be_adapter *adapter)
2481{
2482        struct net_device *netdev = adapter->netdev;
2483        int status;
2484
2485        if (msix_enabled(adapter)) {
2486                status = be_msix_register(adapter);
2487                if (status == 0)
2488                        goto done;
2489                /* INTx is not supported for VF */
2490                if (!be_physfn(adapter))
2491                        return status;
2492        }
2493
2494        /* INTx: only the first EQ is used */
2495        netdev->irq = adapter->pdev->irq;
2496        status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2497                             &adapter->eq_obj[0]);
2498        if (status) {
2499                dev_err(&adapter->pdev->dev,
2500                        "INTx request IRQ failed - err %d\n", status);
2501                return status;
2502        }
2503done:
2504        adapter->isr_registered = true;
2505        return 0;
2506}
2507
2508static void be_irq_unregister(struct be_adapter *adapter)
2509{
2510        struct net_device *netdev = adapter->netdev;
2511        struct be_eq_obj *eqo;
2512        int i;
2513
2514        if (!adapter->isr_registered)
2515                return;
2516
2517        /* INTx */
2518        if (!msix_enabled(adapter)) {
2519                free_irq(netdev->irq, &adapter->eq_obj[0]);
2520                goto done;
2521        }
2522
2523        /* MSIx */
2524        for_all_evt_queues(adapter, eqo, i)
2525                free_irq(be_msix_vec_get(adapter, eqo), eqo);
2526
2527done:
2528        adapter->isr_registered = false;
2529}
2530
2531static void be_rx_qs_destroy(struct be_adapter *adapter)
2532{
2533        struct be_queue_info *q;
2534        struct be_rx_obj *rxo;
2535        int i;
2536
2537        for_all_rx_queues(adapter, rxo, i) {
2538                q = &rxo->q;
2539                if (q->created) {
2540                        be_cmd_rxq_destroy(adapter, q);
2541                        be_rx_cq_clean(rxo);
2542                }
2543                be_queue_free(adapter, q);
2544        }
2545}
2546
2547static int be_close(struct net_device *netdev)
2548{
2549        struct be_adapter *adapter = netdev_priv(netdev);
2550        struct be_eq_obj *eqo;
2551        int i;
2552
2553        be_roce_dev_close(adapter);
2554
2555        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556                for_all_evt_queues(adapter, eqo, i)
2557                        napi_disable(&eqo->napi);
2558                adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559        }
2560
2561        be_async_mcc_disable(adapter);
2562
2563        /* Wait for all pending tx completions to arrive so that
2564         * all tx skbs are freed.
2565         */
2566        netif_tx_disable(netdev);
2567        be_tx_compl_clean(adapter);
2568
2569        be_rx_qs_destroy(adapter);
2570
2571        for_all_evt_queues(adapter, eqo, i) {
2572                if (msix_enabled(adapter))
2573                        synchronize_irq(be_msix_vec_get(adapter, eqo));
2574                else
2575                        synchronize_irq(netdev->irq);
2576                be_eq_clean(eqo);
2577        }
2578
2579        be_irq_unregister(adapter);
2580
2581        return 0;
2582}
2583
2584static int be_rx_qs_create(struct be_adapter *adapter)
2585{
2586        struct be_rx_obj *rxo;
2587        int rc, i, j;
2588        u8 rsstable[128];
2589
2590        for_all_rx_queues(adapter, rxo, i) {
2591                rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2592                                    sizeof(struct be_eth_rx_d));
2593                if (rc)
2594                        return rc;
2595        }
2596
2597        /* The FW would like the default RXQ to be created first */
2598        rxo = default_rxo(adapter);
2599        rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2600                               adapter->if_handle, false, &rxo->rss_id);
2601        if (rc)
2602                return rc;
2603
2604        for_all_rss_queues(adapter, rxo, i) {
2605                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2606                                       rx_frag_size, adapter->if_handle,
2607                                       true, &rxo->rss_id);
2608                if (rc)
2609                        return rc;
2610        }
2611
2612        if (be_multi_rxq(adapter)) {
2613                for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2614                        for_all_rss_queues(adapter, rxo, i) {
2615                                if ((j + i) >= 128)
2616                                        break;
2617                                rsstable[j + i] = rxo->rss_id;
2618                        }
2619                }
2620                adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2621                                        RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2622
2623                if (!BEx_chip(adapter))
2624                        adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2625                                                RSS_ENABLE_UDP_IPV6;
2626
2627                rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2628                                       128);
2629                if (rc) {
2630                        adapter->rss_flags = 0;
2631                        return rc;
2632                }
2633        }
2634
2635        /* First time posting */
2636        for_all_rx_queues(adapter, rxo, i)
2637                be_post_rx_frags(rxo, GFP_KERNEL);
2638        return 0;
2639}
2640
2641static int be_open(struct net_device *netdev)
2642{
2643        struct be_adapter *adapter = netdev_priv(netdev);
2644        struct be_eq_obj *eqo;
2645        struct be_rx_obj *rxo;
2646        struct be_tx_obj *txo;
2647        u8 link_status;
2648        int status, i;
2649
2650        status = be_rx_qs_create(adapter);
2651        if (status)
2652                goto err;
2653
2654        status = be_irq_register(adapter);
2655        if (status)
2656                goto err;
2657
2658        for_all_rx_queues(adapter, rxo, i)
2659                be_cq_notify(adapter, rxo->cq.id, true, 0);
2660
2661        for_all_tx_queues(adapter, txo, i)
2662                be_cq_notify(adapter, txo->cq.id, true, 0);
2663
2664        be_async_mcc_enable(adapter);
2665
2666        for_all_evt_queues(adapter, eqo, i) {
2667                napi_enable(&eqo->napi);
2668                be_eq_notify(adapter, eqo->q.id, true, false, 0);
2669        }
2670        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2671
2672        status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2673        if (!status)
2674                be_link_status_update(adapter, link_status);
2675
2676        netif_tx_start_all_queues(netdev);
2677        be_roce_dev_open(adapter);
2678        return 0;
2679err:
2680        be_close(adapter->netdev);
2681        return -EIO;
2682}
2683
2684static int be_setup_wol(struct be_adapter *adapter, bool enable)
2685{
2686        struct be_dma_mem cmd;
2687        int status = 0;
2688        u8 mac[ETH_ALEN];
2689
2690        memset(mac, 0, ETH_ALEN);
2691
2692        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2693        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2694                                    GFP_KERNEL | __GFP_ZERO);
2695        if (cmd.va == NULL)
2696                return -1;
2697
2698        if (enable) {
2699                status = pci_write_config_dword(adapter->pdev,
2700                        PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2701                if (status) {
2702                        dev_err(&adapter->pdev->dev,
2703                                "Could not enable Wake-on-lan\n");
2704                        dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2705                                          cmd.dma);
2706                        return status;
2707                }
2708                status = be_cmd_enable_magic_wol(adapter,
2709                                adapter->netdev->dev_addr, &cmd);
2710                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2711                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2712        } else {
2713                status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2714                pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2715                pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2716        }
2717
2718        dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2719        return status;
2720}
2721
2722/*
2723 * Generate a seed MAC address from the PF MAC Address using jhash.
2724 * MAC Address for VFs are assigned incrementally starting from the seed.
2725 * These addresses are programmed in the ASIC by the PF and the VF driver
2726 * queries for the MAC address during its probe.
2727 */
2728static int be_vf_eth_addr_config(struct be_adapter *adapter)
2729{
2730        u32 vf;
2731        int status = 0;
2732        u8 mac[ETH_ALEN];
2733        struct be_vf_cfg *vf_cfg;
2734
2735        be_vf_eth_addr_generate(adapter, mac);
2736
2737        for_all_vfs(adapter, vf_cfg, vf) {
2738                if (lancer_chip(adapter)) {
2739                        status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2740                } else {
2741                        status = be_cmd_pmac_add(adapter, mac,
2742                                                 vf_cfg->if_handle,
2743                                                 &vf_cfg->pmac_id, vf + 1);
2744                }
2745
2746                if (status)
2747                        dev_err(&adapter->pdev->dev,
2748                        "Mac address assignment failed for VF %d\n", vf);
2749                else
2750                        memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2751
2752                mac[5] += 1;
2753        }
2754        return status;
2755}
2756
2757static int be_vfs_mac_query(struct be_adapter *adapter)
2758{
2759        int status, vf;
2760        u8 mac[ETH_ALEN];
2761        struct be_vf_cfg *vf_cfg;
2762        bool active;
2763
2764        for_all_vfs(adapter, vf_cfg, vf) {
2765                be_cmd_get_mac_from_list(adapter, mac, &active,
2766                                         &vf_cfg->pmac_id, 0);
2767
2768                status = be_cmd_mac_addr_query(adapter, mac, false,
2769                                               vf_cfg->if_handle, 0);
2770                if (status)
2771                        return status;
2772                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2773        }
2774        return 0;
2775}
2776
2777static void be_vf_clear(struct be_adapter *adapter)
2778{
2779        struct be_vf_cfg *vf_cfg;
2780        u32 vf;
2781
2782        if (pci_vfs_assigned(adapter->pdev)) {
2783                dev_warn(&adapter->pdev->dev,
2784                         "VFs are assigned to VMs: not disabling VFs\n");
2785                goto done;
2786        }
2787
2788        pci_disable_sriov(adapter->pdev);
2789
2790        for_all_vfs(adapter, vf_cfg, vf) {
2791                if (lancer_chip(adapter))
2792                        be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793                else
2794                        be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795                                        vf_cfg->pmac_id, vf + 1);
2796
2797                be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798        }
2799done:
2800        kfree(adapter->vf_cfg);
2801        adapter->num_vfs = 0;
2802}
2803
2804static int be_clear(struct be_adapter *adapter)
2805{
2806        int i = 1;
2807
2808        if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809                cancel_delayed_work_sync(&adapter->work);
2810                adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811        }
2812
2813        if (sriov_enabled(adapter))
2814                be_vf_clear(adapter);
2815
2816        for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2817                be_cmd_pmac_del(adapter, adapter->if_handle,
2818                        adapter->pmac_id[i], 0);
2819
2820        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2821
2822        be_mcc_queues_destroy(adapter);
2823        be_rx_cqs_destroy(adapter);
2824        be_tx_queues_destroy(adapter);
2825        be_evt_queues_destroy(adapter);
2826
2827        kfree(adapter->pmac_id);
2828        adapter->pmac_id = NULL;
2829
2830        be_msix_disable(adapter);
2831        return 0;
2832}
2833
2834static int be_vfs_if_create(struct be_adapter *adapter)
2835{
2836        struct be_vf_cfg *vf_cfg;
2837        u32 cap_flags, en_flags, vf;
2838        int status;
2839
2840        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2841                    BE_IF_FLAGS_MULTICAST;
2842
2843        for_all_vfs(adapter, vf_cfg, vf) {
2844                if (!BE3_chip(adapter))
2845                        be_cmd_get_profile_config(adapter, &cap_flags,
2846                                                  NULL, vf + 1);
2847
2848                /* If a FW profile exists, then cap_flags are updated */
2849                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2850                           BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2851                status = be_cmd_if_create(adapter, cap_flags, en_flags,
2852                                          &vf_cfg->if_handle, vf + 1);
2853                if (status)
2854                        goto err;
2855        }
2856err:
2857        return status;
2858}
2859
2860static int be_vf_setup_init(struct be_adapter *adapter)
2861{
2862        struct be_vf_cfg *vf_cfg;
2863        int vf;
2864
2865        adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2866                                  GFP_KERNEL);
2867        if (!adapter->vf_cfg)
2868                return -ENOMEM;
2869
2870        for_all_vfs(adapter, vf_cfg, vf) {
2871                vf_cfg->if_handle = -1;
2872                vf_cfg->pmac_id = -1;
2873        }
2874        return 0;
2875}
2876
2877static int be_vf_setup(struct be_adapter *adapter)
2878{
2879        struct be_vf_cfg *vf_cfg;
2880        u16 def_vlan, lnk_speed;
2881        int status, old_vfs, vf;
2882        struct device *dev = &adapter->pdev->dev;
2883
2884        old_vfs = pci_num_vf(adapter->pdev);
2885        if (old_vfs) {
2886                dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2887                if (old_vfs != num_vfs)
2888                        dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2889                adapter->num_vfs = old_vfs;
2890        } else {
2891                if (num_vfs > adapter->dev_num_vfs)
2892                        dev_info(dev, "Device supports %d VFs and not %d\n",
2893                                 adapter->dev_num_vfs, num_vfs);
2894                adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2895                if (!adapter->num_vfs)
2896                        return 0;
2897        }
2898
2899        status = be_vf_setup_init(adapter);
2900        if (status)
2901                goto err;
2902
2903        if (old_vfs) {
2904                for_all_vfs(adapter, vf_cfg, vf) {
2905                        status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2906                        if (status)
2907                                goto err;
2908                }
2909        } else {
2910                status = be_vfs_if_create(adapter);
2911                if (status)
2912                        goto err;
2913        }
2914
2915        if (old_vfs) {
2916                status = be_vfs_mac_query(adapter);
2917                if (status)
2918                        goto err;
2919        } else {
2920                status = be_vf_eth_addr_config(adapter);
2921                if (status)
2922                        goto err;
2923        }
2924
2925        for_all_vfs(adapter, vf_cfg, vf) {
2926                /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2927                 * Allow full available bandwidth
2928                 */
2929                if (BE3_chip(adapter) && !old_vfs)
2930                        be_cmd_set_qos(adapter, 1000, vf+1);
2931
2932                status = be_cmd_link_status_query(adapter, &lnk_speed,
2933                                                  NULL, vf + 1);
2934                if (!status)
2935                        vf_cfg->tx_rate = lnk_speed;
2936
2937                status = be_cmd_get_hsw_config(adapter, &def_vlan,
2938                                               vf + 1, vf_cfg->if_handle);
2939                if (status)
2940                        goto err;
2941                vf_cfg->def_vid = def_vlan;
2942
2943                be_cmd_enable_vf(adapter, vf + 1);
2944        }
2945
2946        if (!old_vfs) {
2947                status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2948                if (status) {
2949                        dev_err(dev, "SRIOV enable failed\n");
2950                        adapter->num_vfs = 0;
2951                        goto err;
2952                }
2953        }
2954        return 0;
2955err:
2956        dev_err(dev, "VF setup failed\n");
2957        be_vf_clear(adapter);
2958        return status;
2959}
2960
2961static void be_setup_init(struct be_adapter *adapter)
2962{
2963        adapter->vlan_prio_bmap = 0xff;
2964        adapter->phy.link_speed = -1;
2965        adapter->if_handle = -1;
2966        adapter->be3_native = false;
2967        adapter->promiscuous = false;
2968        if (be_physfn(adapter))
2969                adapter->cmd_privileges = MAX_PRIVILEGES;
2970        else
2971                adapter->cmd_privileges = MIN_PRIVILEGES;
2972}
2973
2974static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2975                           bool *active_mac, u32 *pmac_id)
2976{
2977        int status = 0;
2978
2979        if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2980                memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2981                if (!lancer_chip(adapter) && !be_physfn(adapter))
2982                        *active_mac = true;
2983                else
2984                        *active_mac = false;
2985
2986                return status;
2987        }
2988
2989        if (lancer_chip(adapter)) {
2990                status = be_cmd_get_mac_from_list(adapter, mac,
2991                                                  active_mac, pmac_id, 0);
2992                if (*active_mac) {
2993                        status = be_cmd_mac_addr_query(adapter, mac, false,
2994                                                       if_handle, *pmac_id);
2995                }
2996        } else if (be_physfn(adapter)) {
2997                /* For BE3, for PF get permanent MAC */
2998                status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2999                *active_mac = false;
3000        } else {
3001                /* For BE3, for VF get soft MAC assigned by PF*/
3002                status = be_cmd_mac_addr_query(adapter, mac, false,
3003                                               if_handle, 0);
3004                *active_mac = true;
3005        }
3006        return status;
3007}
3008
3009static void be_get_resources(struct be_adapter *adapter)
3010{
3011        u16 dev_num_vfs;
3012        int pos, status;
3013        bool profile_present = false;
3014        u16 txq_count = 0;
3015
3016        if (!BEx_chip(adapter)) {
3017                status = be_cmd_get_func_config(adapter);
3018                if (!status)
3019                        profile_present = true;
3020        } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3021                be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3022        }
3023
3024        if (profile_present) {
3025                /* Sanity fixes for Lancer */
3026                adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3027                                              BE_UC_PMAC_COUNT);
3028                adapter->max_vlans = min_t(u16, adapter->max_vlans,
3029                                           BE_NUM_VLANS_SUPPORTED);
3030                adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3031                                               BE_MAX_MC);
3032                adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3033                                               MAX_TX_QS);
3034                adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3035                                                BE3_MAX_RSS_QS);
3036                adapter->max_event_queues = min_t(u16,
3037                                                  adapter->max_event_queues,
3038                                                  BE3_MAX_RSS_QS);
3039
3040                if (adapter->max_rss_queues &&
3041                    adapter->max_rss_queues == adapter->max_rx_queues)
3042                        adapter->max_rss_queues -= 1;
3043
3044                if (adapter->max_event_queues < adapter->max_rss_queues)
3045                        adapter->max_rss_queues = adapter->max_event_queues;
3046
3047        } else {
3048                if (be_physfn(adapter))
3049                        adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3050                else
3051                        adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3052
3053                if (adapter->function_mode & FLEX10_MODE)
3054                        adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3055                else
3056                        adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3057
3058                adapter->max_mcast_mac = BE_MAX_MC;
3059                adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3060                adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3061                                               MAX_TX_QS);
3062                adapter->max_rss_queues = (adapter->be3_native) ?
3063                                           BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3064                adapter->max_event_queues = BE3_MAX_RSS_QS;
3065
3066                adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3067                                        BE_IF_FLAGS_BROADCAST |
3068                                        BE_IF_FLAGS_MULTICAST |
3069                                        BE_IF_FLAGS_PASS_L3L4_ERRORS |
3070                                        BE_IF_FLAGS_MCAST_PROMISCUOUS |
3071                                        BE_IF_FLAGS_VLAN_PROMISCUOUS |
3072                                        BE_IF_FLAGS_PROMISCUOUS;
3073
3074                if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3075                        adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3076        }
3077
3078        pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3079        if (pos) {
3080                pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3081                                     &dev_num_vfs);
3082                if (BE3_chip(adapter))
3083                        dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3084                adapter->dev_num_vfs = dev_num_vfs;
3085        }
3086}
3087
3088/* Routine to query per function resource limits */
3089static int be_get_config(struct be_adapter *adapter)
3090{
3091        int status;
3092
3093        status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3094                                     &adapter->function_mode,
3095                                     &adapter->function_caps,
3096                                     &adapter->asic_rev);
3097        if (status)
3098                goto err;
3099
3100        be_get_resources(adapter);
3101
3102        /* primary mac needs 1 pmac entry */
3103        adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3104                                   sizeof(u32), GFP_KERNEL);
3105        if (!adapter->pmac_id) {
3106                status = -ENOMEM;
3107                goto err;
3108        }
3109
3110err:
3111        return status;
3112}
3113
3114static int be_setup(struct be_adapter *adapter)
3115{
3116        struct device *dev = &adapter->pdev->dev;
3117        u32 en_flags;
3118        u32 tx_fc, rx_fc;
3119        int status;
3120        u8 mac[ETH_ALEN];
3121        bool active_mac;
3122
3123        be_setup_init(adapter);
3124
3125        if (!lancer_chip(adapter))
3126                be_cmd_req_native_mode(adapter);
3127
3128        status = be_get_config(adapter);
3129        if (status)
3130                goto err;
3131
3132        status = be_msix_enable(adapter);
3133        if (status)
3134                goto err;
3135
3136        status = be_evt_queues_create(adapter);
3137        if (status)
3138                goto err;
3139
3140        status = be_tx_cqs_create(adapter);
3141        if (status)
3142                goto err;
3143
3144        status = be_rx_cqs_create(adapter);
3145        if (status)
3146                goto err;
3147
3148        status = be_mcc_queues_create(adapter);
3149        if (status)
3150                goto err;
3151
3152        be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3153        /* In UMC mode FW does not return right privileges.
3154         * Override with correct privilege equivalent to PF.
3155         */
3156        if (be_is_mc(adapter))
3157                adapter->cmd_privileges = MAX_PRIVILEGES;
3158
3159        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3160                        BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3161
3162        if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3163                en_flags |= BE_IF_FLAGS_RSS;
3164
3165        en_flags = en_flags & adapter->if_cap_flags;
3166
3167        status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3168                                  &adapter->if_handle, 0);
3169        if (status != 0)
3170                goto err;
3171
3172        memset(mac, 0, ETH_ALEN);
3173        active_mac = false;
3174        status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3175                                 &active_mac, &adapter->pmac_id[0]);
3176        if (status != 0)
3177                goto err;
3178
3179        if (!active_mac) {
3180                status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3181                                         &adapter->pmac_id[0], 0);
3182                if (status != 0)
3183                        goto err;
3184        }
3185
3186        if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3187                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3188                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3189        }
3190
3191        status = be_tx_qs_create(adapter);
3192        if (status)
3193                goto err;
3194
3195        be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3196
3197        if (adapter->vlans_added)
3198                be_vid_config(adapter);
3199
3200        be_set_rx_mode(adapter->netdev);
3201
3202        be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3203
3204        if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3205                be_cmd_set_flow_control(adapter, adapter->tx_fc,
3206                                        adapter->rx_fc);
3207
3208        if (be_physfn(adapter)) {
3209                if (adapter->dev_num_vfs)
3210                        be_vf_setup(adapter);
3211                else
3212                        dev_warn(dev, "device doesn't support SRIOV\n");
3213        }
3214
3215        status = be_cmd_get_phy_info(adapter);
3216        if (!status && be_pause_supported(adapter))
3217                adapter->phy.fc_autoneg = 1;
3218
3219        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3220        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3221        return 0;
3222err:
3223        be_clear(adapter);
3224        return status;
3225}
3226
3227#ifdef CONFIG_NET_POLL_CONTROLLER
3228static void be_netpoll(struct net_device *netdev)
3229{
3230        struct be_adapter *adapter = netdev_priv(netdev);
3231        struct be_eq_obj *eqo;
3232        int i;
3233
3234        for_all_evt_queues(adapter, eqo, i) {
3235                be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3236                napi_schedule(&eqo->napi);
3237        }
3238
3239        return;
3240}
3241#endif
3242
3243#define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3244char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3245
3246static bool be_flash_redboot(struct be_adapter *adapter,
3247                        const u8 *p, u32 img_start, int image_size,
3248                        int hdr_size)
3249{
3250        u32 crc_offset;
3251        u8 flashed_crc[4];
3252        int status;
3253
3254        crc_offset = hdr_size + img_start + image_size - 4;
3255
3256        p += crc_offset;
3257
3258        status = be_cmd_get_flash_crc(adapter, flashed_crc,
3259                        (image_size - 4));
3260        if (status) {
3261                dev_err(&adapter->pdev->dev,
3262                "could not get crc from flash, not flashing redboot\n");
3263                return false;
3264        }
3265
3266        /*update redboot only if crc does not match*/
3267        if (!memcmp(flashed_crc, p, 4))
3268                return false;
3269        else
3270                return true;
3271}
3272
3273static bool phy_flashing_required(struct be_adapter *adapter)
3274{
3275        return (adapter->phy.phy_type == TN_8022 &&
3276                adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3277}
3278
3279static bool is_comp_in_ufi(struct be_adapter *adapter,
3280                           struct flash_section_info *fsec, int type)
3281{
3282        int i = 0, img_type = 0;
3283        struct flash_section_info_g2 *fsec_g2 = NULL;
3284
3285        if (BE2_chip(adapter))
3286                fsec_g2 = (struct flash_section_info_g2 *)fsec;
3287
3288        for (i = 0; i < MAX_FLASH_COMP; i++) {
3289                if (fsec_g2)
3290                        img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3291                else
3292                        img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3293
3294                if (img_type == type)
3295                        return true;
3296        }
3297        return false;
3298
3299}
3300
3301struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3302                                         int header_size,
3303                                         const struct firmware *fw)
3304{
3305        struct flash_section_info *fsec = NULL;
3306        const u8 *p = fw->data;
3307
3308        p += header_size;
3309        while (p < (fw->data + fw->size)) {
3310                fsec = (struct flash_section_info *)p;
3311                if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3312                        return fsec;
3313                p += 32;
3314        }
3315        return NULL;
3316}
3317
3318static int be_flash(struct be_adapter *adapter, const u8 *img,
3319                struct be_dma_mem *flash_cmd, int optype, int img_size)
3320{
3321        u32 total_bytes = 0, flash_op, num_bytes = 0;
3322        int status = 0;
3323        struct be_cmd_write_flashrom *req = flash_cmd->va;
3324
3325        total_bytes = img_size;
3326        while (total_bytes) {
3327                num_bytes = min_t(u32, 32*1024, total_bytes);
3328
3329                total_bytes -= num_bytes;
3330
3331                if (!total_bytes) {
3332                        if (optype == OPTYPE_PHY_FW)
3333                                flash_op = FLASHROM_OPER_PHY_FLASH;
3334                        else
3335                                flash_op = FLASHROM_OPER_FLASH;
3336                } else {
3337                        if (optype == OPTYPE_PHY_FW)
3338                                flash_op = FLASHROM_OPER_PHY_SAVE;
3339                        else
3340                                flash_op = FLASHROM_OPER_SAVE;
3341                }
3342
3343                memcpy(req->data_buf, img, num_bytes);
3344                img += num_bytes;
3345                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3346                                                flash_op, num_bytes);
3347                if (status) {
3348                        if (status == ILLEGAL_IOCTL_REQ &&
3349                            optype == OPTYPE_PHY_FW)
3350                                break;
3351                        dev_err(&adapter->pdev->dev,
3352                                "cmd to write to flash rom failed.\n");
3353                        return status;
3354                }
3355        }
3356        return 0;
3357}
3358
3359/* For BE2, BE3 and BE3-R */
3360static int be_flash_BEx(struct be_adapter *adapter,
3361                         const struct firmware *fw,
3362                         struct be_dma_mem *flash_cmd,
3363                         int num_of_images)
3364
3365{
3366        int status = 0, i, filehdr_size = 0;
3367        int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3368        const u8 *p = fw->data;
3369        const struct flash_comp *pflashcomp;
3370        int num_comp, redboot;
3371        struct flash_section_info *fsec = NULL;
3372
3373        struct flash_comp gen3_flash_types[] = {
3374                { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3375                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3376                { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3377                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3378                { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3379                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3380                { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3381                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3382                { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3383                        FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3384                { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3385                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3386                { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3387                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3388                { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3389                        FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3390                { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3391                        FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3392                { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3393                        FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3394        };
3395
3396        struct flash_comp gen2_flash_types[] = {
3397                { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3398                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3399                { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3400                        FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3401                { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3402                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3403                { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3404                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3405                { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3406                        FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3407                { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3408                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3409                { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3410                        FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3411                { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3412                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3413        };
3414
3415        if (BE3_chip(adapter)) {
3416                pflashcomp = gen3_flash_types;
3417                filehdr_size = sizeof(struct flash_file_hdr_g3);
3418                num_comp = ARRAY_SIZE(gen3_flash_types);
3419        } else {
3420                pflashcomp = gen2_flash_types;
3421                filehdr_size = sizeof(struct flash_file_hdr_g2);
3422                num_comp = ARRAY_SIZE(gen2_flash_types);
3423        }
3424
3425        /* Get flash section info*/
3426        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3427        if (!fsec) {
3428                dev_err(&adapter->pdev->dev,
3429                        "Invalid Cookie. UFI corrupted ?\n");
3430                return -1;
3431        }
3432        for (i = 0; i < num_comp; i++) {
3433                if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3434                        continue;
3435
3436                if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3437                    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3438                        continue;
3439
3440                if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3441                    !phy_flashing_required(adapter))
3442                                continue;
3443
3444                if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3445                        redboot = be_flash_redboot(adapter, fw->data,
3446                                pflashcomp[i].offset, pflashcomp[i].size,
3447                                filehdr_size + img_hdrs_size);
3448                        if (!redboot)
3449                                continue;
3450                }
3451
3452                p = fw->data;
3453                p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3454                if (p + pflashcomp[i].size > fw->data + fw->size)
3455                        return -1;
3456
3457                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3458                                        pflashcomp[i].size);
3459                if (status) {
3460                        dev_err(&adapter->pdev->dev,
3461                                "Flashing section type %d failed.\n",
3462                                pflashcomp[i].img_type);
3463                        return status;
3464                }
3465        }
3466        return 0;
3467}
3468
3469static int be_flash_skyhawk(struct be_adapter *adapter,
3470                const struct firmware *fw,
3471                struct be_dma_mem *flash_cmd, int num_of_images)
3472{
3473        int status = 0, i, filehdr_size = 0;
3474        int img_offset, img_size, img_optype, redboot;
3475        int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3476        const u8 *p = fw->data;
3477        struct flash_section_info *fsec = NULL;
3478
3479        filehdr_size = sizeof(struct flash_file_hdr_g3);
3480        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3481        if (!fsec) {
3482                dev_err(&adapter->pdev->dev,
3483                        "Invalid Cookie. UFI corrupted ?\n");
3484                return -1;
3485        }
3486
3487        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3488                img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3489                img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3490
3491                switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3492                case IMAGE_FIRMWARE_iSCSI:
3493                        img_optype = OPTYPE_ISCSI_ACTIVE;
3494                        break;
3495                case IMAGE_BOOT_CODE:
3496                        img_optype = OPTYPE_REDBOOT;
3497                        break;
3498                case IMAGE_OPTION_ROM_ISCSI:
3499                        img_optype = OPTYPE_BIOS;
3500                        break;
3501                case IMAGE_OPTION_ROM_PXE:
3502                        img_optype = OPTYPE_PXE_BIOS;
3503                        break;
3504                case IMAGE_OPTION_ROM_FCoE:
3505                        img_optype = OPTYPE_FCOE_BIOS;
3506                        break;
3507                case IMAGE_FIRMWARE_BACKUP_iSCSI:
3508                        img_optype = OPTYPE_ISCSI_BACKUP;
3509                        break;
3510                case IMAGE_NCSI:
3511                        img_optype = OPTYPE_NCSI_FW;
3512                        break;
3513                default:
3514                        continue;
3515                }
3516
3517                if (img_optype == OPTYPE_REDBOOT) {
3518                        redboot = be_flash_redboot(adapter, fw->data,
3519                                        img_offset, img_size,
3520                                        filehdr_size + img_hdrs_size);
3521                        if (!redboot)
3522                                continue;
3523                }
3524
3525                p = fw->data;
3526                p += filehdr_size + img_offset + img_hdrs_size;
3527                if (p + img_size > fw->data + fw->size)
3528                        return -1;
3529
3530                status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3531                if (status) {
3532                        dev_err(&adapter->pdev->dev,
3533                                "Flashing section type %d failed.\n",
3534                                fsec->fsec_entry[i].type);
3535                        return status;
3536                }
3537        }
3538        return 0;
3539}
3540
3541static int lancer_fw_download(struct be_adapter *adapter,
3542                                const struct firmware *fw)
3543{
3544#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3545#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3546        struct be_dma_mem flash_cmd;
3547        const u8 *data_ptr = NULL;
3548        u8 *dest_image_ptr = NULL;
3549        size_t image_size = 0;
3550        u32 chunk_size = 0;
3551        u32 data_written = 0;
3552        u32 offset = 0;
3553        int status = 0;
3554        u8 add_status = 0;
3555        u8 change_status;
3556
3557        if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3558                dev_err(&adapter->pdev->dev,
3559                        "FW Image not properly aligned. "
3560                        "Length must be 4 byte aligned.\n");
3561                status = -EINVAL;
3562                goto lancer_fw_exit;
3563        }
3564
3565        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3566                                + LANCER_FW_DOWNLOAD_CHUNK;
3567        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3568                                          &flash_cmd.dma, GFP_KERNEL);
3569        if (!flash_cmd.va) {
3570                status = -ENOMEM;
3571                goto lancer_fw_exit;
3572        }
3573
3574        dest_image_ptr = flash_cmd.va +
3575                                sizeof(struct lancer_cmd_req_write_object);
3576        image_size = fw->size;
3577        data_ptr = fw->data;
3578
3579        while (image_size) {
3580                chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3581
3582                /* Copy the image chunk content. */
3583                memcpy(dest_image_ptr, data_ptr, chunk_size);
3584
3585                status = lancer_cmd_write_object(adapter, &flash_cmd,
3586                                                 chunk_size, offset,
3587                                                 LANCER_FW_DOWNLOAD_LOCATION,
3588                                                 &data_written, &change_status,
3589                                                 &add_status);
3590                if (status)
3591                        break;
3592
3593                offset += data_written;
3594                data_ptr += data_written;
3595                image_size -= data_written;
3596        }
3597
3598        if (!status) {
3599                /* Commit the FW written */
3600                status = lancer_cmd_write_object(adapter, &flash_cmd,
3601                                                 0, offset,
3602                                                 LANCER_FW_DOWNLOAD_LOCATION,
3603                                                 &data_written, &change_status,
3604                                                 &add_status);
3605        }
3606
3607        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3608                                flash_cmd.dma);
3609        if (status) {
3610                dev_err(&adapter->pdev->dev,
3611                        "Firmware load error. "
3612                        "Status code: 0x%x Additional Status: 0x%x\n",
3613                        status, add_status);
3614                goto lancer_fw_exit;
3615        }
3616
3617        if (change_status == LANCER_FW_RESET_NEEDED) {
3618                status = lancer_physdev_ctrl(adapter,
3619                                             PHYSDEV_CONTROL_FW_RESET_MASK);
3620                if (status) {
3621                        dev_err(&adapter->pdev->dev,
3622                                "Adapter busy for FW reset.\n"
3623                                "New FW will not be active.\n");
3624                        goto lancer_fw_exit;
3625                }
3626        } else if (change_status != LANCER_NO_RESET_NEEDED) {
3627                        dev_err(&adapter->pdev->dev,
3628                                "System reboot required for new FW"
3629                                " to be active\n");
3630        }
3631
3632        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3633lancer_fw_exit:
3634        return status;
3635}
3636
3637#define UFI_TYPE2               2
3638#define UFI_TYPE3               3
3639#define UFI_TYPE3R              10
3640#define UFI_TYPE4               4
3641static int be_get_ufi_type(struct be_adapter *adapter,
3642                           struct flash_file_hdr_g3 *fhdr)
3643{
3644        if (fhdr == NULL)
3645                goto be_get_ufi_exit;
3646
3647        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3648                return UFI_TYPE4;
3649        else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3650                if (fhdr->asic_type_rev == 0x10)
3651                        return UFI_TYPE3R;
3652                else
3653                        return UFI_TYPE3;
3654        } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3655                return UFI_TYPE2;
3656
3657be_get_ufi_exit:
3658        dev_err(&adapter->pdev->dev,
3659                "UFI and Interface are not compatible for flashing\n");
3660        return -1;
3661}
3662
3663static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3664{
3665        struct flash_file_hdr_g3 *fhdr3;
3666        struct image_hdr *img_hdr_ptr = NULL;
3667        struct be_dma_mem flash_cmd;
3668        const u8 *p;
3669        int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3670
3671        flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3672        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3673                                          &flash_cmd.dma, GFP_KERNEL);
3674        if (!flash_cmd.va) {
3675                status = -ENOMEM;
3676                goto be_fw_exit;
3677        }
3678
3679        p = fw->data;
3680        fhdr3 = (struct flash_file_hdr_g3 *)p;
3681
3682        ufi_type = be_get_ufi_type(adapter, fhdr3);
3683
3684        num_imgs = le32_to_cpu(fhdr3->num_imgs);
3685        for (i = 0; i < num_imgs; i++) {
3686                img_hdr_ptr = (struct image_hdr *)(fw->data +
3687                                (sizeof(struct flash_file_hdr_g3) +
3688                                 i * sizeof(struct image_hdr)));
3689                if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3690                        switch (ufi_type) {
3691                        case UFI_TYPE4:
3692                                status = be_flash_skyhawk(adapter, fw,
3693                                                        &flash_cmd, num_imgs);
3694                                break;
3695                        case UFI_TYPE3R:
3696                                status = be_flash_BEx(adapter, fw, &flash_cmd,
3697                                                      num_imgs);
3698                                break;
3699                        case UFI_TYPE3:
3700                                /* Do not flash this ufi on BE3-R cards */
3701                                if (adapter->asic_rev < 0x10)
3702                                        status = be_flash_BEx(adapter, fw,
3703                                                              &flash_cmd,
3704                                                              num_imgs);
3705                                else {
3706                                        status = -1;
3707                                        dev_err(&adapter->pdev->dev,
3708                                                "Can't load BE3 UFI on BE3R\n");
3709                                }
3710                        }
3711                }
3712        }
3713
3714        if (ufi_type == UFI_TYPE2)
3715                status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3716        else if (ufi_type == -1)
3717                status = -1;
3718
3719        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3720                          flash_cmd.dma);
3721        if (status) {
3722                dev_err(&adapter->pdev->dev, "Firmware load error\n");
3723                goto be_fw_exit;
3724        }
3725
3726        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3727
3728be_fw_exit:
3729        return status;
3730}
3731
3732int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3733{
3734        const struct firmware *fw;
3735        int status;
3736
3737        if (!netif_running(adapter->netdev)) {
3738                dev_err(&adapter->pdev->dev,
3739                        "Firmware load not allowed (interface is down)\n");
3740                return -1;
3741        }
3742
3743        status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3744        if (status)
3745                goto fw_exit;
3746
3747        dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3748
3749        if (lancer_chip(adapter))
3750                status = lancer_fw_download(adapter, fw);
3751        else
3752                status = be_fw_download(adapter, fw);
3753
3754        if (!status)
3755                be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3756                                  adapter->fw_on_flash);
3757
3758fw_exit:
3759        release_firmware(fw);
3760        return status;
3761}
3762
3763static const struct net_device_ops be_netdev_ops = {
3764        .ndo_open               = be_open,
3765        .ndo_stop               = be_close,
3766        .ndo_start_xmit         = be_xmit,
3767        .ndo_set_rx_mode        = be_set_rx_mode,
3768        .ndo_set_mac_address    = be_mac_addr_set,
3769        .ndo_change_mtu         = be_change_mtu,
3770        .ndo_get_stats64        = be_get_stats64,
3771        .ndo_validate_addr      = eth_validate_addr,
3772        .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3773        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3774        .ndo_set_vf_mac         = be_set_vf_mac,
3775        .ndo_set_vf_vlan        = be_set_vf_vlan,
3776        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3777        .ndo_get_vf_config      = be_get_vf_config,
3778#ifdef CONFIG_NET_POLL_CONTROLLER
3779        .ndo_poll_controller    = be_netpoll,
3780#endif
3781};
3782
3783static void be_netdev_init(struct net_device *netdev)
3784{
3785        struct be_adapter *adapter = netdev_priv(netdev);
3786        struct be_eq_obj *eqo;
3787        int i;
3788
3789        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3790                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3791                NETIF_F_HW_VLAN_CTAG_TX;
3792        if (be_multi_rxq(adapter))
3793                netdev->hw_features |= NETIF_F_RXHASH;
3794
3795        netdev->features |= netdev->hw_features |
3796                NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3797
3798        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3799                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3800
3801        netdev->priv_flags |= IFF_UNICAST_FLT;
3802
3803        netdev->flags |= IFF_MULTICAST;
3804
3805        netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3806
3807        netdev->netdev_ops = &be_netdev_ops;
3808
3809        SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3810
3811        for_all_evt_queues(adapter, eqo, i)
3812                netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3813}
3814
3815static void be_unmap_pci_bars(struct be_adapter *adapter)
3816{
3817        if (adapter->csr)
3818                pci_iounmap(adapter->pdev, adapter->csr);
3819        if (adapter->db)
3820                pci_iounmap(adapter->pdev, adapter->db);
3821}
3822
3823static int db_bar(struct be_adapter *adapter)
3824{
3825        if (lancer_chip(adapter) || !be_physfn(adapter))
3826                return 0;
3827        else
3828                return 4;
3829}
3830
3831static int be_roce_map_pci_bars(struct be_adapter *adapter)
3832{
3833        if (skyhawk_chip(adapter)) {
3834                adapter->roce_db.size = 4096;
3835                adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3836                                                              db_bar(adapter));
3837                adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3838                                                               db_bar(adapter));
3839        }
3840        return 0;
3841}
3842
3843static int be_map_pci_bars(struct be_adapter *adapter)
3844{
3845        u8 __iomem *addr;
3846        u32 sli_intf;
3847
3848        pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3849        adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3850                                SLI_INTF_IF_TYPE_SHIFT;
3851
3852        if (BEx_chip(adapter) && be_physfn(adapter)) {
3853                adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3854                if (adapter->csr == NULL)
3855                        return -ENOMEM;
3856        }
3857
3858        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3859        if (addr == NULL)
3860                goto pci_map_err;
3861        adapter->db = addr;
3862
3863        be_roce_map_pci_bars(adapter);
3864        return 0;
3865
3866pci_map_err:
3867        be_unmap_pci_bars(adapter);
3868        return -ENOMEM;
3869}
3870
3871static void be_ctrl_cleanup(struct be_adapter *adapter)
3872{
3873        struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3874
3875        be_unmap_pci_bars(adapter);
3876
3877        if (mem->va)
3878                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3879                                  mem->dma);
3880
3881        mem = &adapter->rx_filter;
3882        if (mem->va)
3883                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3884                                  mem->dma);
3885}
3886
3887static int be_ctrl_init(struct be_adapter *adapter)
3888{
3889        struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3890        struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3891        struct be_dma_mem *rx_filter = &adapter->rx_filter;
3892        u32 sli_intf;
3893        int status;
3894
3895        pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3896        adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3897                                 SLI_INTF_FAMILY_SHIFT;
3898        adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3899
3900        status = be_map_pci_bars(adapter);
3901        if (status)
3902                goto done;
3903
3904        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3905        mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3906                                                mbox_mem_alloc->size,
3907                                                &mbox_mem_alloc->dma,
3908                                                GFP_KERNEL);
3909        if (!mbox_mem_alloc->va) {
3910                status = -ENOMEM;
3911                goto unmap_pci_bars;
3912        }
3913        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3914        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3915        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3916        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3917
3918        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3919        rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3920                                           &rx_filter->dma,
3921                                           GFP_KERNEL | __GFP_ZERO);
3922        if (rx_filter->va == NULL) {
3923                status = -ENOMEM;
3924                goto free_mbox;
3925        }
3926
3927        mutex_init(&adapter->mbox_lock);
3928        spin_lock_init(&adapter->mcc_lock);
3929        spin_lock_init(&adapter->mcc_cq_lock);
3930
3931        init_completion(&adapter->flash_compl);
3932        pci_save_state(adapter->pdev);
3933        return 0;
3934
3935free_mbox:
3936        dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3937                          mbox_mem_alloc->va, mbox_mem_alloc->dma);
3938
3939unmap_pci_bars:
3940        be_unmap_pci_bars(adapter);
3941
3942done:
3943        return status;
3944}
3945
3946static void be_stats_cleanup(struct be_adapter *adapter)
3947{
3948        struct be_dma_mem *cmd = &adapter->stats_cmd;
3949
3950        if (cmd->va)
3951                dma_free_coherent(&adapter->pdev->dev, cmd->size,
3952                                  cmd->va, cmd->dma);
3953}
3954
3955static int be_stats_init(struct be_adapter *adapter)
3956{
3957        struct be_dma_mem *cmd = &adapter->stats_cmd;
3958
3959        if (lancer_chip(adapter))
3960                cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3961        else if (BE2_chip(adapter))
3962                cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3963        else
3964                /* BE3 and Skyhawk */
3965                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3966
3967        cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3968                                     GFP_KERNEL | __GFP_ZERO);
3969        if (cmd->va == NULL)
3970                return -1;
3971        return 0;
3972}
3973
3974static void be_remove(struct pci_dev *pdev)
3975{
3976        struct be_adapter *adapter = pci_get_drvdata(pdev);
3977
3978        if (!adapter)
3979                return;
3980
3981        be_roce_dev_remove(adapter);
3982        be_intr_set(adapter, false);
3983
3984        cancel_delayed_work_sync(&adapter->func_recovery_work);
3985
3986        unregister_netdev(adapter->netdev);
3987
3988        be_clear(adapter);
3989
3990        /* tell fw we're done with firing cmds */
3991        be_cmd_fw_clean(adapter);
3992
3993        be_stats_cleanup(adapter);
3994
3995        be_ctrl_cleanup(adapter);
3996
3997        pci_disable_pcie_error_reporting(pdev);
3998
3999        pci_set_drvdata(pdev, NULL);
4000        pci_release_regions(pdev);
4001        pci_disable_device(pdev);
4002
4003        free_netdev(adapter->netdev);
4004}
4005
4006bool be_is_wol_supported(struct be_adapter *adapter)
4007{
4008        return ((adapter->wol_cap & BE_WOL_CAP) &&
4009                !be_is_wol_excluded(adapter)) ? true : false;
4010}
4011
4012u32 be_get_fw_log_level(struct be_adapter *adapter)
4013{
4014        struct be_dma_mem extfat_cmd;
4015        struct be_fat_conf_params *cfgs;
4016        int status;
4017        u32 level = 0;
4018        int j;
4019
4020        if (lancer_chip(adapter))
4021                return 0;
4022
4023        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4024        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4025        extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4026                                             &extfat_cmd.dma);
4027
4028        if (!extfat_cmd.va) {
4029                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4030                        __func__);
4031                goto err;
4032        }
4033
4034        status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4035        if (!status) {
4036                cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4037                                                sizeof(struct be_cmd_resp_hdr));
4038                for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4039                        if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4040                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4041                }
4042        }
4043        pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4044                            extfat_cmd.dma);
4045err:
4046        return level;
4047}
4048
4049static int be_get_initial_config(struct be_adapter *adapter)
4050{
4051        int status;
4052        u32 level;
4053
4054        status = be_cmd_get_cntl_attributes(adapter);
4055        if (status)
4056                return status;
4057
4058        status = be_cmd_get_acpi_wol_cap(adapter);
4059        if (status) {
4060                /* in case of a failure to get wol capabillities
4061                 * check the exclusion list to determine WOL capability */
4062                if (!be_is_wol_excluded(adapter))
4063                        adapter->wol_cap |= BE_WOL_CAP;
4064        }
4065
4066        if (be_is_wol_supported(adapter))
4067                adapter->wol = true;
4068
4069        /* Must be a power of 2 or else MODULO will BUG_ON */
4070        adapter->be_get_temp_freq = 64;
4071
4072        level = be_get_fw_log_level(adapter);
4073        adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4074
4075        return 0;
4076}
4077
4078static int lancer_recover_func(struct be_adapter *adapter)
4079{
4080        struct device *dev = &adapter->pdev->dev;
4081        int status;
4082
4083        status = lancer_test_and_set_rdy_state(adapter);
4084        if (status)
4085                goto err;
4086
4087        if (netif_running(adapter->netdev))
4088                be_close(adapter->netdev);
4089
4090        be_clear(adapter);
4091
4092        be_clear_all_error(adapter);
4093
4094        status = be_setup(adapter);
4095        if (status)
4096                goto err;
4097
4098        if (netif_running(adapter->netdev)) {
4099                status = be_open(adapter->netdev);
4100                if (status)
4101                        goto err;
4102        }
4103
4104        dev_err(dev, "Error recovery successful\n");
4105        return 0;
4106err:
4107        if (status == -EAGAIN)
4108                dev_err(dev, "Waiting for resource provisioning\n");
4109        else
4110                dev_err(dev, "Error recovery failed\n");
4111
4112        return status;
4113}
4114
4115static void be_func_recovery_task(struct work_struct *work)
4116{
4117        struct be_adapter *adapter =
4118                container_of(work, struct be_adapter,  func_recovery_work.work);
4119        int status = 0;
4120
4121        be_detect_error(adapter);
4122
4123        if (adapter->hw_error && lancer_chip(adapter)) {
4124
4125                rtnl_lock();
4126                netif_device_detach(adapter->netdev);
4127                rtnl_unlock();
4128
4129                status = lancer_recover_func(adapter);
4130                if (!status)
4131                        netif_device_attach(adapter->netdev);
4132        }
4133
4134        /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4135         * no need to attempt further recovery.
4136         */
4137        if (!status || status == -EAGAIN)
4138                schedule_delayed_work(&adapter->func_recovery_work,
4139                                      msecs_to_jiffies(1000));
4140}
4141
4142static void be_worker(struct work_struct *work)
4143{
4144        struct be_adapter *adapter =
4145                container_of(work, struct be_adapter, work.work);
4146        struct be_rx_obj *rxo;
4147        struct be_eq_obj *eqo;
4148        int i;
4149
4150        /* when interrupts are not yet enabled, just reap any pending
4151        * mcc completions */
4152        if (!netif_running(adapter->netdev)) {
4153                local_bh_disable();
4154                be_process_mcc(adapter);
4155                local_bh_enable();
4156                goto reschedule;
4157        }
4158
4159        if (!adapter->stats_cmd_sent) {
4160                if (lancer_chip(adapter))
4161                        lancer_cmd_get_pport_stats(adapter,
4162                                                &adapter->stats_cmd);
4163                else
4164                        be_cmd_get_stats(adapter, &adapter->stats_cmd);
4165        }
4166
4167        if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4168                be_cmd_get_die_temperature(adapter);
4169
4170        for_all_rx_queues(adapter, rxo, i) {
4171                if (rxo->rx_post_starved) {
4172                        rxo->rx_post_starved = false;
4173                        be_post_rx_frags(rxo, GFP_KERNEL);
4174                }
4175        }
4176
4177        for_all_evt_queues(adapter, eqo, i)
4178                be_eqd_update(adapter, eqo);
4179
4180reschedule:
4181        adapter->work_counter++;
4182        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4183}
4184
4185/* If any VFs are already enabled don't FLR the PF */
4186static bool be_reset_required(struct be_adapter *adapter)
4187{
4188        return pci_num_vf(adapter->pdev) ? false : true;
4189}
4190
4191static char *mc_name(struct be_adapter *adapter)
4192{
4193        if (adapter->function_mode & FLEX10_MODE)
4194                return "FLEX10";
4195        else if (adapter->function_mode & VNIC_MODE)
4196                return "vNIC";
4197        else if (adapter->function_mode & UMC_ENABLED)
4198                return "UMC";
4199        else
4200                return "";
4201}
4202
4203static inline char *func_name(struct be_adapter *adapter)
4204{
4205        return be_physfn(adapter) ? "PF" : "VF";
4206}
4207
4208static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4209{
4210        int status = 0;
4211        struct be_adapter *adapter;
4212        struct net_device *netdev;
4213        char port_name;
4214
4215        status = pci_enable_device(pdev);
4216        if (status)
4217                goto do_none;
4218
4219        status = pci_request_regions(pdev, DRV_NAME);
4220        if (status)
4221                goto disable_dev;
4222        pci_set_master(pdev);
4223
4224        netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4225        if (netdev == NULL) {
4226                status = -ENOMEM;
4227                goto rel_reg;
4228        }
4229        adapter = netdev_priv(netdev);
4230        adapter->pdev = pdev;
4231        pci_set_drvdata(pdev, adapter);
4232        adapter->netdev = netdev;
4233        SET_NETDEV_DEV(netdev, &pdev->dev);
4234
4235        status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4236        if (!status) {
4237                status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4238                if (status < 0) {
4239                        dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4240                        goto free_netdev;
4241                }
4242                netdev->features |= NETIF_F_HIGHDMA;
4243        } else {
4244                status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4245                if (!status)
4246                        status = dma_set_coherent_mask(&pdev->dev,
4247                                                       DMA_BIT_MASK(32));
4248                if (status) {
4249                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4250                        goto free_netdev;
4251                }
4252        }
4253
4254        status = pci_enable_pcie_error_reporting(pdev);
4255        if (status)
4256                dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4257
4258        status = be_ctrl_init(adapter);
4259        if (status)
4260                goto free_netdev;
4261
4262        /* sync up with fw's ready state */
4263        if (be_physfn(adapter)) {
4264                status = be_fw_wait_ready(adapter);
4265                if (status)
4266                        goto ctrl_clean;
4267        }
4268
4269        if (be_reset_required(adapter)) {
4270                status = be_cmd_reset_function(adapter);
4271                if (status)
4272                        goto ctrl_clean;
4273
4274                /* Wait for interrupts to quiesce after an FLR */
4275                msleep(100);
4276        }
4277
4278        /* Allow interrupts for other ULPs running on NIC function */
4279        be_intr_set(adapter, true);
4280
4281        /* tell fw we're ready to fire cmds */
4282        status = be_cmd_fw_init(adapter);
4283        if (status)
4284                goto ctrl_clean;
4285
4286        status = be_stats_init(adapter);
4287        if (status)
4288                goto ctrl_clean;
4289
4290        status = be_get_initial_config(adapter);
4291        if (status)
4292                goto stats_clean;
4293
4294        INIT_DELAYED_WORK(&adapter->work, be_worker);
4295        INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4296        adapter->rx_fc = adapter->tx_fc = true;
4297
4298        status = be_setup(adapter);
4299        if (status)
4300                goto stats_clean;
4301
4302        be_netdev_init(netdev);
4303        status = register_netdev(netdev);
4304        if (status != 0)
4305                goto unsetup;
4306
4307        be_roce_dev_add(adapter);
4308
4309        schedule_delayed_work(&adapter->func_recovery_work,
4310                              msecs_to_jiffies(1000));
4311
4312        be_cmd_query_port_name(adapter, &port_name);
4313
4314        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4315                 func_name(adapter), mc_name(adapter), port_name);
4316
4317        return 0;
4318
4319unsetup:
4320        be_clear(adapter);
4321stats_clean:
4322        be_stats_cleanup(adapter);
4323ctrl_clean:
4324        be_ctrl_cleanup(adapter);
4325free_netdev:
4326        free_netdev(netdev);
4327        pci_set_drvdata(pdev, NULL);
4328rel_reg:
4329        pci_release_regions(pdev);
4330disable_dev:
4331        pci_disable_device(pdev);
4332do_none:
4333        dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4334        return status;
4335}
4336
4337static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4338{
4339        struct be_adapter *adapter = pci_get_drvdata(pdev);
4340        struct net_device *netdev =  adapter->netdev;
4341
4342        if (adapter->wol)
4343                be_setup_wol(adapter, true);
4344
4345        cancel_delayed_work_sync(&adapter->func_recovery_work);
4346
4347        netif_device_detach(netdev);
4348        if (netif_running(netdev)) {
4349                rtnl_lock();
4350                be_close(netdev);
4351                rtnl_unlock();
4352        }
4353        be_clear(adapter);
4354
4355        pci_save_state(pdev);
4356        pci_disable_device(pdev);
4357        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4358        return 0;
4359}
4360
4361static int be_resume(struct pci_dev *pdev)
4362{
4363        int status = 0;
4364        struct be_adapter *adapter = pci_get_drvdata(pdev);
4365        struct net_device *netdev =  adapter->netdev;
4366
4367        netif_device_detach(netdev);
4368
4369        status = pci_enable_device(pdev);
4370        if (status)
4371                return status;
4372
4373        pci_set_power_state(pdev, PCI_D0);
4374        pci_restore_state(pdev);
4375
4376        status = be_fw_wait_ready(adapter);
4377        if (status)
4378                return status;
4379
4380        /* tell fw we're ready to fire cmds */
4381        status = be_cmd_fw_init(adapter);
4382        if (status)
4383                return status;
4384
4385        be_setup(adapter);
4386        if (netif_running(netdev)) {
4387                rtnl_lock();
4388                be_open(netdev);
4389                rtnl_unlock();
4390        }
4391
4392        schedule_delayed_work(&adapter->func_recovery_work,
4393                              msecs_to_jiffies(1000));
4394        netif_device_attach(netdev);
4395
4396        if (adapter->wol)
4397                be_setup_wol(adapter, false);
4398
4399        return 0;
4400}
4401
4402/*
4403 * An FLR will stop BE from DMAing any data.
4404 */
4405static void be_shutdown(struct pci_dev *pdev)
4406{
4407        struct be_adapter *adapter = pci_get_drvdata(pdev);
4408
4409        if (!adapter)
4410                return;
4411
4412        cancel_delayed_work_sync(&adapter->work);
4413        cancel_delayed_work_sync(&adapter->func_recovery_work);
4414
4415        netif_device_detach(adapter->netdev);
4416
4417        be_cmd_reset_function(adapter);
4418
4419        pci_disable_device(pdev);
4420}
4421
4422static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4423                                pci_channel_state_t state)
4424{
4425        struct be_adapter *adapter = pci_get_drvdata(pdev);
4426        struct net_device *netdev =  adapter->netdev;
4427
4428        dev_err(&adapter->pdev->dev, "EEH error detected\n");
4429
4430        if (!adapter->eeh_error) {
4431                adapter->eeh_error = true;
4432
4433                cancel_delayed_work_sync(&adapter->func_recovery_work);
4434
4435                rtnl_lock();
4436                netif_device_detach(netdev);
4437                if (netif_running(netdev))
4438                        be_close(netdev);
4439                rtnl_unlock();
4440
4441                be_clear(adapter);
4442        }
4443
4444        if (state == pci_channel_io_perm_failure)
4445                return PCI_ERS_RESULT_DISCONNECT;
4446
4447        pci_disable_device(pdev);
4448
4449        /* The error could cause the FW to trigger a flash debug dump.
4450         * Resetting the card while flash dump is in progress
4451         * can cause it not to recover; wait for it to finish.
4452         * Wait only for first function as it is needed only once per
4453         * adapter.
4454         */
4455        if (pdev->devfn == 0)
4456                ssleep(30);
4457
4458        return PCI_ERS_RESULT_NEED_RESET;
4459}
4460
4461static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4462{
4463        struct be_adapter *adapter = pci_get_drvdata(pdev);
4464        int status;
4465
4466        dev_info(&adapter->pdev->dev, "EEH reset\n");
4467
4468        status = pci_enable_device(pdev);
4469        if (status)
4470                return PCI_ERS_RESULT_DISCONNECT;
4471
4472        pci_set_master(pdev);
4473        pci_set_power_state(pdev, PCI_D0);
4474        pci_restore_state(pdev);
4475
4476        /* Check if card is ok and fw is ready */
4477        dev_info(&adapter->pdev->dev,
4478                 "Waiting for FW to be ready after EEH reset\n");
4479        status = be_fw_wait_ready(adapter);
4480        if (status)
4481                return PCI_ERS_RESULT_DISCONNECT;
4482
4483        pci_cleanup_aer_uncorrect_error_status(pdev);
4484        be_clear_all_error(adapter);
4485        return PCI_ERS_RESULT_RECOVERED;
4486}
4487
4488static void be_eeh_resume(struct pci_dev *pdev)
4489{
4490        int status = 0;
4491        struct be_adapter *adapter = pci_get_drvdata(pdev);
4492        struct net_device *netdev =  adapter->netdev;
4493
4494        dev_info(&adapter->pdev->dev, "EEH resume\n");
4495
4496        pci_save_state(pdev);
4497
4498        status = be_cmd_reset_function(adapter);
4499        if (status)
4500                goto err;
4501
4502        /* tell fw we're ready to fire cmds */
4503        status = be_cmd_fw_init(adapter);
4504        if (status)
4505                goto err;
4506
4507        status = be_setup(adapter);
4508        if (status)
4509                goto err;
4510
4511        if (netif_running(netdev)) {
4512                status = be_open(netdev);
4513                if (status)
4514                        goto err;
4515        }
4516
4517        schedule_delayed_work(&adapter->func_recovery_work,
4518                              msecs_to_jiffies(1000));
4519        netif_device_attach(netdev);
4520        return;
4521err:
4522        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4523}
4524
4525static const struct pci_error_handlers be_eeh_handlers = {
4526        .error_detected = be_eeh_err_detected,
4527        .slot_reset = be_eeh_reset,
4528        .resume = be_eeh_resume,
4529};
4530
4531static struct pci_driver be_driver = {
4532        .name = DRV_NAME,
4533        .id_table = be_dev_ids,
4534        .probe = be_probe,
4535        .remove = be_remove,
4536        .suspend = be_suspend,
4537        .resume = be_resume,
4538        .shutdown = be_shutdown,
4539        .err_handler = &be_eeh_handlers
4540};
4541
4542static int __init be_init_module(void)
4543{
4544        if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4545            rx_frag_size != 2048) {
4546                printk(KERN_WARNING DRV_NAME
4547                        " : Module param rx_frag_size must be 2048/4096/8192."
4548                        " Using 2048\n");
4549                rx_frag_size = 2048;
4550        }
4551
4552        return pci_register_driver(&be_driver);
4553}
4554module_init(be_init_module);
4555
4556static void __exit be_exit_module(void)
4557{
4558        pci_unregister_driver(&be_driver);
4559}
4560module_exit(be_exit_module);
4561