linux/drivers/net/ethernet/broadcom/bnx2.c
<<
>>
Prefs
   1/* bnx2.c: QLogic bnx2 network driver.
   2 *
   3 * Copyright (c) 2004-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by: Michael Chan  (mchan@broadcom.com)
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/moduleparam.h>
  17
  18#include <linux/stringify.h>
  19#include <linux/kernel.h>
  20#include <linux/timer.h>
  21#include <linux/errno.h>
  22#include <linux/ioport.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/interrupt.h>
  26#include <linux/pci.h>
  27#include <linux/netdevice.h>
  28#include <linux/etherdevice.h>
  29#include <linux/skbuff.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/bitops.h>
  32#include <asm/io.h>
  33#include <asm/irq.h>
  34#include <linux/delay.h>
  35#include <asm/byteorder.h>
  36#include <asm/page.h>
  37#include <linux/time.h>
  38#include <linux/ethtool.h>
  39#include <linux/mii.h>
  40#include <linux/if.h>
  41#include <linux/if_vlan.h>
  42#include <net/ip.h>
  43#include <net/tcp.h>
  44#include <net/checksum.h>
  45#include <linux/workqueue.h>
  46#include <linux/crc32.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/firmware.h>
  50#include <linux/log2.h>
  51#include <linux/aer.h>
  52#include <linux/crash_dump.h>
  53
  54#if IS_ENABLED(CONFIG_CNIC)
  55#define BCM_CNIC 1
  56#include "cnic_if.h"
  57#endif
  58#include "bnx2.h"
  59#include "bnx2_fw.h"
  60
  61#define DRV_MODULE_NAME         "bnx2"
  62#define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
  63#define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
  64#define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
  65#define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  66#define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
  67
  68#define RUN_AT(x) (jiffies + (x))
  69
  70/* Time in jiffies before concluding the transmitter is hung. */
  71#define TX_TIMEOUT  (5*HZ)
  72
  73MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  74MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
  75MODULE_LICENSE("GPL");
  76MODULE_FIRMWARE(FW_MIPS_FILE_06);
  77MODULE_FIRMWARE(FW_RV2P_FILE_06);
  78MODULE_FIRMWARE(FW_MIPS_FILE_09);
  79MODULE_FIRMWARE(FW_RV2P_FILE_09);
  80MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  81
  82static int disable_msi = 0;
  83
  84module_param(disable_msi, int, 0444);
  85MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  86
  87typedef enum {
  88        BCM5706 = 0,
  89        NC370T,
  90        NC370I,
  91        BCM5706S,
  92        NC370F,
  93        BCM5708,
  94        BCM5708S,
  95        BCM5709,
  96        BCM5709S,
  97        BCM5716,
  98        BCM5716S,
  99} board_t;
 100
 101/* indexed by board_t, above */
 102static struct {
 103        char *name;
 104} board_info[] = {
 105        { "Broadcom NetXtreme II BCM5706 1000Base-T" },
 106        { "HP NC370T Multifunction Gigabit Server Adapter" },
 107        { "HP NC370i Multifunction Gigabit Server Adapter" },
 108        { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 109        { "HP NC370F Multifunction Gigabit Server Adapter" },
 110        { "Broadcom NetXtreme II BCM5708 1000Base-T" },
 111        { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 112        { "Broadcom NetXtreme II BCM5709 1000Base-T" },
 113        { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 114        { "Broadcom NetXtreme II BCM5716 1000Base-T" },
 115        { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 116        };
 117
 118static const struct pci_device_id bnx2_pci_tbl[] = {
 119        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 120          PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 121        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 122          PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 123        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 124          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 125        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 126          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 127        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 128          PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 129        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 130          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 131        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 132          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 133        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 134          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 135        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 136          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 137        { PCI_VENDOR_ID_BROADCOM, 0x163b,
 138          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 139        { PCI_VENDOR_ID_BROADCOM, 0x163c,
 140          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 141        { 0, }
 142};
 143
 144static const struct flash_spec flash_table[] =
 145{
 146#define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 147#define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
 148        /* Slow EEPROM */
 149        {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 150         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 151         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 152         "EEPROM - slow"},
 153        /* Expansion entry 0001 */
 154        {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 155         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 156         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 157         "Entry 0001"},
 158        /* Saifun SA25F010 (non-buffered flash) */
 159        /* strap, cfg1, & write1 need updates */
 160        {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 161         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 162         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 163         "Non-buffered flash (128kB)"},
 164        /* Saifun SA25F020 (non-buffered flash) */
 165        /* strap, cfg1, & write1 need updates */
 166        {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 167         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 168         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 169         "Non-buffered flash (256kB)"},
 170        /* Expansion entry 0100 */
 171        {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 172         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 173         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 174         "Entry 0100"},
 175        /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 176        {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 177         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 178         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 179         "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 180        /* Entry 0110: ST M45PE20 (non-buffered flash)*/
 181        {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 182         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 183         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 184         "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 185        /* Saifun SA25F005 (non-buffered flash) */
 186        /* strap, cfg1, & write1 need updates */
 187        {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 188         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 189         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 190         "Non-buffered flash (64kB)"},
 191        /* Fast EEPROM */
 192        {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 193         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 194         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 195         "EEPROM - fast"},
 196        /* Expansion entry 1001 */
 197        {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 198         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 199         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 200         "Entry 1001"},
 201        /* Expansion entry 1010 */
 202        {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 203         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 204         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 205         "Entry 1010"},
 206        /* ATMEL AT45DB011B (buffered flash) */
 207        {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 208         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 209         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 210         "Buffered flash (128kB)"},
 211        /* Expansion entry 1100 */
 212        {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 213         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 214         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 215         "Entry 1100"},
 216        /* Expansion entry 1101 */
 217        {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 218         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 219         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 220         "Entry 1101"},
 221        /* Ateml Expansion entry 1110 */
 222        {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 223         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 224         BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 225         "Entry 1110 (Atmel)"},
 226        /* ATMEL AT45DB021B (buffered flash) */
 227        {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 228         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 229         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 230         "Buffered flash (256kB)"},
 231};
 232
 233static const struct flash_spec flash_5709 = {
 234        .flags          = BNX2_NV_BUFFERED,
 235        .page_bits      = BCM5709_FLASH_PAGE_BITS,
 236        .page_size      = BCM5709_FLASH_PAGE_SIZE,
 237        .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
 238        .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
 239        .name           = "5709 Buffered flash (256kB)",
 240};
 241
 242MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 243
 244static void bnx2_init_napi(struct bnx2 *bp);
 245static void bnx2_del_napi(struct bnx2 *bp);
 246
 247static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 248{
 249        u32 diff;
 250
 251        /* The ring uses 256 indices for 255 entries, one of them
 252         * needs to be skipped.
 253         */
 254        diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
 255        if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
 256                diff &= 0xffff;
 257                if (diff == BNX2_TX_DESC_CNT)
 258                        diff = BNX2_MAX_TX_DESC_CNT;
 259        }
 260        return bp->tx_ring_size - diff;
 261}
 262
 263static u32
 264bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 265{
 266        unsigned long flags;
 267        u32 val;
 268
 269        spin_lock_irqsave(&bp->indirect_lock, flags);
 270        BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 271        val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
 272        spin_unlock_irqrestore(&bp->indirect_lock, flags);
 273        return val;
 274}
 275
 276static void
 277bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 278{
 279        unsigned long flags;
 280
 281        spin_lock_irqsave(&bp->indirect_lock, flags);
 282        BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 283        BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 284        spin_unlock_irqrestore(&bp->indirect_lock, flags);
 285}
 286
 287static void
 288bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 289{
 290        bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 291}
 292
 293static u32
 294bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 295{
 296        return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
 297}
 298
 299static void
 300bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 301{
 302        unsigned long flags;
 303
 304        offset += cid_addr;
 305        spin_lock_irqsave(&bp->indirect_lock, flags);
 306        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 307                int i;
 308
 309                BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
 310                BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
 311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 312                for (i = 0; i < 5; i++) {
 313                        val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
 314                        if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 315                                break;
 316                        udelay(5);
 317                }
 318        } else {
 319                BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
 320                BNX2_WR(bp, BNX2_CTX_DATA, val);
 321        }
 322        spin_unlock_irqrestore(&bp->indirect_lock, flags);
 323}
 324
 325#ifdef BCM_CNIC
 326static int
 327bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 328{
 329        struct bnx2 *bp = netdev_priv(dev);
 330        struct drv_ctl_io *io = &info->data.io;
 331
 332        switch (info->cmd) {
 333        case DRV_CTL_IO_WR_CMD:
 334                bnx2_reg_wr_ind(bp, io->offset, io->data);
 335                break;
 336        case DRV_CTL_IO_RD_CMD:
 337                io->data = bnx2_reg_rd_ind(bp, io->offset);
 338                break;
 339        case DRV_CTL_CTX_WR_CMD:
 340                bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 341                break;
 342        default:
 343                return -EINVAL;
 344        }
 345        return 0;
 346}
 347
 348static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 349{
 350        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 351        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 352        int sb_id;
 353
 354        if (bp->flags & BNX2_FLAG_USING_MSIX) {
 355                cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 356                bnapi->cnic_present = 0;
 357                sb_id = bp->irq_nvecs;
 358                cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 359        } else {
 360                cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 361                bnapi->cnic_tag = bnapi->last_status_idx;
 362                bnapi->cnic_present = 1;
 363                sb_id = 0;
 364                cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 365        }
 366
 367        cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 368        cp->irq_arr[0].status_blk = (void *)
 369                ((unsigned long) bnapi->status_blk.msi +
 370                (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 371        cp->irq_arr[0].status_blk_num = sb_id;
 372        cp->num_irq = 1;
 373}
 374
 375static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 376                              void *data)
 377{
 378        struct bnx2 *bp = netdev_priv(dev);
 379        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 380
 381        if (!ops)
 382                return -EINVAL;
 383
 384        if (cp->drv_state & CNIC_DRV_STATE_REGD)
 385                return -EBUSY;
 386
 387        if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
 388                return -ENODEV;
 389
 390        bp->cnic_data = data;
 391        rcu_assign_pointer(bp->cnic_ops, ops);
 392
 393        cp->num_irq = 0;
 394        cp->drv_state = CNIC_DRV_STATE_REGD;
 395
 396        bnx2_setup_cnic_irq_info(bp);
 397
 398        return 0;
 399}
 400
 401static int bnx2_unregister_cnic(struct net_device *dev)
 402{
 403        struct bnx2 *bp = netdev_priv(dev);
 404        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 405        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 406
 407        mutex_lock(&bp->cnic_lock);
 408        cp->drv_state = 0;
 409        bnapi->cnic_present = 0;
 410        RCU_INIT_POINTER(bp->cnic_ops, NULL);
 411        mutex_unlock(&bp->cnic_lock);
 412        synchronize_rcu();
 413        return 0;
 414}
 415
 416static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 417{
 418        struct bnx2 *bp = netdev_priv(dev);
 419        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 420
 421        if (!cp->max_iscsi_conn)
 422                return NULL;
 423
 424        cp->drv_owner = THIS_MODULE;
 425        cp->chip_id = bp->chip_id;
 426        cp->pdev = bp->pdev;
 427        cp->io_base = bp->regview;
 428        cp->drv_ctl = bnx2_drv_ctl;
 429        cp->drv_register_cnic = bnx2_register_cnic;
 430        cp->drv_unregister_cnic = bnx2_unregister_cnic;
 431
 432        return cp;
 433}
 434
 435static void
 436bnx2_cnic_stop(struct bnx2 *bp)
 437{
 438        struct cnic_ops *c_ops;
 439        struct cnic_ctl_info info;
 440
 441        mutex_lock(&bp->cnic_lock);
 442        c_ops = rcu_dereference_protected(bp->cnic_ops,
 443                                          lockdep_is_held(&bp->cnic_lock));
 444        if (c_ops) {
 445                info.cmd = CNIC_CTL_STOP_CMD;
 446                c_ops->cnic_ctl(bp->cnic_data, &info);
 447        }
 448        mutex_unlock(&bp->cnic_lock);
 449}
 450
 451static void
 452bnx2_cnic_start(struct bnx2 *bp)
 453{
 454        struct cnic_ops *c_ops;
 455        struct cnic_ctl_info info;
 456
 457        mutex_lock(&bp->cnic_lock);
 458        c_ops = rcu_dereference_protected(bp->cnic_ops,
 459                                          lockdep_is_held(&bp->cnic_lock));
 460        if (c_ops) {
 461                if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 462                        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 463
 464                        bnapi->cnic_tag = bnapi->last_status_idx;
 465                }
 466                info.cmd = CNIC_CTL_START_CMD;
 467                c_ops->cnic_ctl(bp->cnic_data, &info);
 468        }
 469        mutex_unlock(&bp->cnic_lock);
 470}
 471
 472#else
 473
 474static void
 475bnx2_cnic_stop(struct bnx2 *bp)
 476{
 477}
 478
 479static void
 480bnx2_cnic_start(struct bnx2 *bp)
 481{
 482}
 483
 484#endif
 485
 486static int
 487bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 488{
 489        u32 val1;
 490        int i, ret;
 491
 492        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 493                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 494                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 495
 496                BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 497                BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 498
 499                udelay(40);
 500        }
 501
 502        val1 = (bp->phy_addr << 21) | (reg << 16) |
 503                BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 504                BNX2_EMAC_MDIO_COMM_START_BUSY;
 505        BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 506
 507        for (i = 0; i < 50; i++) {
 508                udelay(10);
 509
 510                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 511                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 512                        udelay(5);
 513
 514                        val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 515                        val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 516
 517                        break;
 518                }
 519        }
 520
 521        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 522                *val = 0x0;
 523                ret = -EBUSY;
 524        }
 525        else {
 526                *val = val1;
 527                ret = 0;
 528        }
 529
 530        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 531                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 532                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 533
 534                BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 535                BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 536
 537                udelay(40);
 538        }
 539
 540        return ret;
 541}
 542
 543static int
 544bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 545{
 546        u32 val1;
 547        int i, ret;
 548
 549        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 550                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 551                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 552
 553                BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 554                BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 555
 556                udelay(40);
 557        }
 558
 559        val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 560                BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 561                BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 562        BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 563
 564        for (i = 0; i < 50; i++) {
 565                udelay(10);
 566
 567                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 568                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 569                        udelay(5);
 570                        break;
 571                }
 572        }
 573
 574        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 575                ret = -EBUSY;
 576        else
 577                ret = 0;
 578
 579        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 580                val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 581                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 582
 583                BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 584                BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 585
 586                udelay(40);
 587        }
 588
 589        return ret;
 590}
 591
 592static void
 593bnx2_disable_int(struct bnx2 *bp)
 594{
 595        int i;
 596        struct bnx2_napi *bnapi;
 597
 598        for (i = 0; i < bp->irq_nvecs; i++) {
 599                bnapi = &bp->bnx2_napi[i];
 600                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 601                       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 602        }
 603        BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 604}
 605
 606static void
 607bnx2_enable_int(struct bnx2 *bp)
 608{
 609        int i;
 610        struct bnx2_napi *bnapi;
 611
 612        for (i = 0; i < bp->irq_nvecs; i++) {
 613                bnapi = &bp->bnx2_napi[i];
 614
 615                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 616                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 617                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 618                        bnapi->last_status_idx);
 619
 620                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 621                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 622                        bnapi->last_status_idx);
 623        }
 624        BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 625}
 626
 627static void
 628bnx2_disable_int_sync(struct bnx2 *bp)
 629{
 630        int i;
 631
 632        atomic_inc(&bp->intr_sem);
 633        if (!netif_running(bp->dev))
 634                return;
 635
 636        bnx2_disable_int(bp);
 637        for (i = 0; i < bp->irq_nvecs; i++)
 638                synchronize_irq(bp->irq_tbl[i].vector);
 639}
 640
 641static void
 642bnx2_napi_disable(struct bnx2 *bp)
 643{
 644        int i;
 645
 646        for (i = 0; i < bp->irq_nvecs; i++)
 647                napi_disable(&bp->bnx2_napi[i].napi);
 648}
 649
 650static void
 651bnx2_napi_enable(struct bnx2 *bp)
 652{
 653        int i;
 654
 655        for (i = 0; i < bp->irq_nvecs; i++)
 656                napi_enable(&bp->bnx2_napi[i].napi);
 657}
 658
 659static void
 660bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 661{
 662        if (stop_cnic)
 663                bnx2_cnic_stop(bp);
 664        if (netif_running(bp->dev)) {
 665                bnx2_napi_disable(bp);
 666                netif_tx_disable(bp->dev);
 667        }
 668        bnx2_disable_int_sync(bp);
 669        netif_carrier_off(bp->dev);     /* prevent tx timeout */
 670}
 671
 672static void
 673bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 674{
 675        if (atomic_dec_and_test(&bp->intr_sem)) {
 676                if (netif_running(bp->dev)) {
 677                        netif_tx_wake_all_queues(bp->dev);
 678                        spin_lock_bh(&bp->phy_lock);
 679                        if (bp->link_up)
 680                                netif_carrier_on(bp->dev);
 681                        spin_unlock_bh(&bp->phy_lock);
 682                        bnx2_napi_enable(bp);
 683                        bnx2_enable_int(bp);
 684                        if (start_cnic)
 685                                bnx2_cnic_start(bp);
 686                }
 687        }
 688}
 689
 690static void
 691bnx2_free_tx_mem(struct bnx2 *bp)
 692{
 693        int i;
 694
 695        for (i = 0; i < bp->num_tx_rings; i++) {
 696                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 697                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 698
 699                if (txr->tx_desc_ring) {
 700                        dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 701                                          txr->tx_desc_ring,
 702                                          txr->tx_desc_mapping);
 703                        txr->tx_desc_ring = NULL;
 704                }
 705                kfree(txr->tx_buf_ring);
 706                txr->tx_buf_ring = NULL;
 707        }
 708}
 709
 710static void
 711bnx2_free_rx_mem(struct bnx2 *bp)
 712{
 713        int i;
 714
 715        for (i = 0; i < bp->num_rx_rings; i++) {
 716                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 717                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 718                int j;
 719
 720                for (j = 0; j < bp->rx_max_ring; j++) {
 721                        if (rxr->rx_desc_ring[j])
 722                                dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 723                                                  rxr->rx_desc_ring[j],
 724                                                  rxr->rx_desc_mapping[j]);
 725                        rxr->rx_desc_ring[j] = NULL;
 726                }
 727                vfree(rxr->rx_buf_ring);
 728                rxr->rx_buf_ring = NULL;
 729
 730                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 731                        if (rxr->rx_pg_desc_ring[j])
 732                                dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 733                                                  rxr->rx_pg_desc_ring[j],
 734                                                  rxr->rx_pg_desc_mapping[j]);
 735                        rxr->rx_pg_desc_ring[j] = NULL;
 736                }
 737                vfree(rxr->rx_pg_ring);
 738                rxr->rx_pg_ring = NULL;
 739        }
 740}
 741
 742static int
 743bnx2_alloc_tx_mem(struct bnx2 *bp)
 744{
 745        int i;
 746
 747        for (i = 0; i < bp->num_tx_rings; i++) {
 748                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 749                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 750
 751                txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 752                if (!txr->tx_buf_ring)
 753                        return -ENOMEM;
 754
 755                txr->tx_desc_ring =
 756                        dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 757                                           &txr->tx_desc_mapping, GFP_KERNEL);
 758                if (!txr->tx_desc_ring)
 759                        return -ENOMEM;
 760        }
 761        return 0;
 762}
 763
 764static int
 765bnx2_alloc_rx_mem(struct bnx2 *bp)
 766{
 767        int i;
 768
 769        for (i = 0; i < bp->num_rx_rings; i++) {
 770                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 771                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 772                int j;
 773
 774                rxr->rx_buf_ring =
 775                        vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
 776                if (!rxr->rx_buf_ring)
 777                        return -ENOMEM;
 778
 779                for (j = 0; j < bp->rx_max_ring; j++) {
 780                        rxr->rx_desc_ring[j] =
 781                                dma_alloc_coherent(&bp->pdev->dev,
 782                                                   RXBD_RING_SIZE,
 783                                                   &rxr->rx_desc_mapping[j],
 784                                                   GFP_KERNEL);
 785                        if (!rxr->rx_desc_ring[j])
 786                                return -ENOMEM;
 787
 788                }
 789
 790                if (bp->rx_pg_ring_size) {
 791                        rxr->rx_pg_ring =
 792                                vzalloc(array_size(SW_RXPG_RING_SIZE,
 793                                                   bp->rx_max_pg_ring));
 794                        if (!rxr->rx_pg_ring)
 795                                return -ENOMEM;
 796
 797                }
 798
 799                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 800                        rxr->rx_pg_desc_ring[j] =
 801                                dma_alloc_coherent(&bp->pdev->dev,
 802                                                   RXBD_RING_SIZE,
 803                                                   &rxr->rx_pg_desc_mapping[j],
 804                                                   GFP_KERNEL);
 805                        if (!rxr->rx_pg_desc_ring[j])
 806                                return -ENOMEM;
 807
 808                }
 809        }
 810        return 0;
 811}
 812
 813static void
 814bnx2_free_stats_blk(struct net_device *dev)
 815{
 816        struct bnx2 *bp = netdev_priv(dev);
 817
 818        if (bp->status_blk) {
 819                dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
 820                                  bp->status_blk,
 821                                  bp->status_blk_mapping);
 822                bp->status_blk = NULL;
 823                bp->stats_blk = NULL;
 824        }
 825}
 826
 827static int
 828bnx2_alloc_stats_blk(struct net_device *dev)
 829{
 830        int status_blk_size;
 831        void *status_blk;
 832        struct bnx2 *bp = netdev_priv(dev);
 833
 834        /* Combine status and statistics blocks into one allocation. */
 835        status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 836        if (bp->flags & BNX2_FLAG_MSIX_CAP)
 837                status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 838                                                 BNX2_SBLK_MSIX_ALIGN_SIZE);
 839        bp->status_stats_size = status_blk_size +
 840                                sizeof(struct statistics_block);
 841        status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 842                                        &bp->status_blk_mapping, GFP_KERNEL);
 843        if (!status_blk)
 844                return -ENOMEM;
 845
 846        bp->status_blk = status_blk;
 847        bp->stats_blk = status_blk + status_blk_size;
 848        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 849
 850        return 0;
 851}
 852
 853static void
 854bnx2_free_mem(struct bnx2 *bp)
 855{
 856        int i;
 857        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 858
 859        bnx2_free_tx_mem(bp);
 860        bnx2_free_rx_mem(bp);
 861
 862        for (i = 0; i < bp->ctx_pages; i++) {
 863                if (bp->ctx_blk[i]) {
 864                        dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
 865                                          bp->ctx_blk[i],
 866                                          bp->ctx_blk_mapping[i]);
 867                        bp->ctx_blk[i] = NULL;
 868                }
 869        }
 870
 871        if (bnapi->status_blk.msi)
 872                bnapi->status_blk.msi = NULL;
 873}
 874
 875static int
 876bnx2_alloc_mem(struct bnx2 *bp)
 877{
 878        int i, err;
 879        struct bnx2_napi *bnapi;
 880
 881        bnapi = &bp->bnx2_napi[0];
 882        bnapi->status_blk.msi = bp->status_blk;
 883        bnapi->hw_tx_cons_ptr =
 884                &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 885        bnapi->hw_rx_cons_ptr =
 886                &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 887        if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 888                for (i = 1; i < bp->irq_nvecs; i++) {
 889                        struct status_block_msix *sblk;
 890
 891                        bnapi = &bp->bnx2_napi[i];
 892
 893                        sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 894                        bnapi->status_blk.msix = sblk;
 895                        bnapi->hw_tx_cons_ptr =
 896                                &sblk->status_tx_quick_consumer_index;
 897                        bnapi->hw_rx_cons_ptr =
 898                                &sblk->status_rx_quick_consumer_index;
 899                        bnapi->int_num = i << 24;
 900                }
 901        }
 902
 903        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 904                bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
 905                if (bp->ctx_pages == 0)
 906                        bp->ctx_pages = 1;
 907                for (i = 0; i < bp->ctx_pages; i++) {
 908                        bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
 909                                                BNX2_PAGE_SIZE,
 910                                                &bp->ctx_blk_mapping[i],
 911                                                GFP_KERNEL);
 912                        if (!bp->ctx_blk[i])
 913                                goto alloc_mem_err;
 914                }
 915        }
 916
 917        err = bnx2_alloc_rx_mem(bp);
 918        if (err)
 919                goto alloc_mem_err;
 920
 921        err = bnx2_alloc_tx_mem(bp);
 922        if (err)
 923                goto alloc_mem_err;
 924
 925        return 0;
 926
 927alloc_mem_err:
 928        bnx2_free_mem(bp);
 929        return -ENOMEM;
 930}
 931
 932static void
 933bnx2_report_fw_link(struct bnx2 *bp)
 934{
 935        u32 fw_link_status = 0;
 936
 937        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 938                return;
 939
 940        if (bp->link_up) {
 941                u32 bmsr;
 942
 943                switch (bp->line_speed) {
 944                case SPEED_10:
 945                        if (bp->duplex == DUPLEX_HALF)
 946                                fw_link_status = BNX2_LINK_STATUS_10HALF;
 947                        else
 948                                fw_link_status = BNX2_LINK_STATUS_10FULL;
 949                        break;
 950                case SPEED_100:
 951                        if (bp->duplex == DUPLEX_HALF)
 952                                fw_link_status = BNX2_LINK_STATUS_100HALF;
 953                        else
 954                                fw_link_status = BNX2_LINK_STATUS_100FULL;
 955                        break;
 956                case SPEED_1000:
 957                        if (bp->duplex == DUPLEX_HALF)
 958                                fw_link_status = BNX2_LINK_STATUS_1000HALF;
 959                        else
 960                                fw_link_status = BNX2_LINK_STATUS_1000FULL;
 961                        break;
 962                case SPEED_2500:
 963                        if (bp->duplex == DUPLEX_HALF)
 964                                fw_link_status = BNX2_LINK_STATUS_2500HALF;
 965                        else
 966                                fw_link_status = BNX2_LINK_STATUS_2500FULL;
 967                        break;
 968                }
 969
 970                fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 971
 972                if (bp->autoneg) {
 973                        fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 974
 975                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 976                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 977
 978                        if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 979                            bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 980                                fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 981                        else
 982                                fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 983                }
 984        }
 985        else
 986                fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 987
 988        bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 989}
 990
 991static char *
 992bnx2_xceiver_str(struct bnx2 *bp)
 993{
 994        return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
 995                ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 996                 "Copper");
 997}
 998
 999static void
1000bnx2_report_link(struct bnx2 *bp)
1001{
1002        if (bp->link_up) {
1003                netif_carrier_on(bp->dev);
1004                netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005                            bnx2_xceiver_str(bp),
1006                            bp->line_speed,
1007                            bp->duplex == DUPLEX_FULL ? "full" : "half");
1008
1009                if (bp->flow_ctrl) {
1010                        if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011                                pr_cont(", receive ");
1012                                if (bp->flow_ctrl & FLOW_CTRL_TX)
1013                                        pr_cont("& transmit ");
1014                        }
1015                        else {
1016                                pr_cont(", transmit ");
1017                        }
1018                        pr_cont("flow control ON");
1019                }
1020                pr_cont("\n");
1021        } else {
1022                netif_carrier_off(bp->dev);
1023                netdev_err(bp->dev, "NIC %s Link is Down\n",
1024                           bnx2_xceiver_str(bp));
1025        }
1026
1027        bnx2_report_fw_link(bp);
1028}
1029
1030static void
1031bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032{
1033        u32 local_adv, remote_adv;
1034
1035        bp->flow_ctrl = 0;
1036        if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037                (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038
1039                if (bp->duplex == DUPLEX_FULL) {
1040                        bp->flow_ctrl = bp->req_flow_ctrl;
1041                }
1042                return;
1043        }
1044
1045        if (bp->duplex != DUPLEX_FULL) {
1046                return;
1047        }
1048
1049        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050            (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051                u32 val;
1052
1053                bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054                if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055                        bp->flow_ctrl |= FLOW_CTRL_TX;
1056                if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057                        bp->flow_ctrl |= FLOW_CTRL_RX;
1058                return;
1059        }
1060
1061        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063
1064        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065                u32 new_local_adv = 0;
1066                u32 new_remote_adv = 0;
1067
1068                if (local_adv & ADVERTISE_1000XPAUSE)
1069                        new_local_adv |= ADVERTISE_PAUSE_CAP;
1070                if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071                        new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072                if (remote_adv & ADVERTISE_1000XPAUSE)
1073                        new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074                if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075                        new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076
1077                local_adv = new_local_adv;
1078                remote_adv = new_remote_adv;
1079        }
1080
1081        /* See Table 28B-3 of 802.3ab-1999 spec. */
1082        if (local_adv & ADVERTISE_PAUSE_CAP) {
1083                if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086                        }
1087                        else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088                                bp->flow_ctrl = FLOW_CTRL_RX;
1089                        }
1090                }
1091                else {
1092                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094                        }
1095                }
1096        }
1097        else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098                if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099                        (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100
1101                        bp->flow_ctrl = FLOW_CTRL_TX;
1102                }
1103        }
1104}
1105
1106static int
1107bnx2_5709s_linkup(struct bnx2 *bp)
1108{
1109        u32 val, speed;
1110
1111        bp->link_up = 1;
1112
1113        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114        bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116
1117        if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118                bp->line_speed = bp->req_line_speed;
1119                bp->duplex = bp->req_duplex;
1120                return 0;
1121        }
1122        speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123        switch (speed) {
1124                case MII_BNX2_GP_TOP_AN_SPEED_10:
1125                        bp->line_speed = SPEED_10;
1126                        break;
1127                case MII_BNX2_GP_TOP_AN_SPEED_100:
1128                        bp->line_speed = SPEED_100;
1129                        break;
1130                case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131                case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132                        bp->line_speed = SPEED_1000;
1133                        break;
1134                case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135                        bp->line_speed = SPEED_2500;
1136                        break;
1137        }
1138        if (val & MII_BNX2_GP_TOP_AN_FD)
1139                bp->duplex = DUPLEX_FULL;
1140        else
1141                bp->duplex = DUPLEX_HALF;
1142        return 0;
1143}
1144
1145static int
1146bnx2_5708s_linkup(struct bnx2 *bp)
1147{
1148        u32 val;
1149
1150        bp->link_up = 1;
1151        bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152        switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153                case BCM5708S_1000X_STAT1_SPEED_10:
1154                        bp->line_speed = SPEED_10;
1155                        break;
1156                case BCM5708S_1000X_STAT1_SPEED_100:
1157                        bp->line_speed = SPEED_100;
1158                        break;
1159                case BCM5708S_1000X_STAT1_SPEED_1G:
1160                        bp->line_speed = SPEED_1000;
1161                        break;
1162                case BCM5708S_1000X_STAT1_SPEED_2G5:
1163                        bp->line_speed = SPEED_2500;
1164                        break;
1165        }
1166        if (val & BCM5708S_1000X_STAT1_FD)
1167                bp->duplex = DUPLEX_FULL;
1168        else
1169                bp->duplex = DUPLEX_HALF;
1170
1171        return 0;
1172}
1173
1174static int
1175bnx2_5706s_linkup(struct bnx2 *bp)
1176{
1177        u32 bmcr, local_adv, remote_adv, common;
1178
1179        bp->link_up = 1;
1180        bp->line_speed = SPEED_1000;
1181
1182        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183        if (bmcr & BMCR_FULLDPLX) {
1184                bp->duplex = DUPLEX_FULL;
1185        }
1186        else {
1187                bp->duplex = DUPLEX_HALF;
1188        }
1189
1190        if (!(bmcr & BMCR_ANENABLE)) {
1191                return 0;
1192        }
1193
1194        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196
1197        common = local_adv & remote_adv;
1198        if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199
1200                if (common & ADVERTISE_1000XFULL) {
1201                        bp->duplex = DUPLEX_FULL;
1202                }
1203                else {
1204                        bp->duplex = DUPLEX_HALF;
1205                }
1206        }
1207
1208        return 0;
1209}
1210
1211static int
1212bnx2_copper_linkup(struct bnx2 *bp)
1213{
1214        u32 bmcr;
1215
1216        bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217
1218        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219        if (bmcr & BMCR_ANENABLE) {
1220                u32 local_adv, remote_adv, common;
1221
1222                bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223                bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224
1225                common = local_adv & (remote_adv >> 2);
1226                if (common & ADVERTISE_1000FULL) {
1227                        bp->line_speed = SPEED_1000;
1228                        bp->duplex = DUPLEX_FULL;
1229                }
1230                else if (common & ADVERTISE_1000HALF) {
1231                        bp->line_speed = SPEED_1000;
1232                        bp->duplex = DUPLEX_HALF;
1233                }
1234                else {
1235                        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236                        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237
1238                        common = local_adv & remote_adv;
1239                        if (common & ADVERTISE_100FULL) {
1240                                bp->line_speed = SPEED_100;
1241                                bp->duplex = DUPLEX_FULL;
1242                        }
1243                        else if (common & ADVERTISE_100HALF) {
1244                                bp->line_speed = SPEED_100;
1245                                bp->duplex = DUPLEX_HALF;
1246                        }
1247                        else if (common & ADVERTISE_10FULL) {
1248                                bp->line_speed = SPEED_10;
1249                                bp->duplex = DUPLEX_FULL;
1250                        }
1251                        else if (common & ADVERTISE_10HALF) {
1252                                bp->line_speed = SPEED_10;
1253                                bp->duplex = DUPLEX_HALF;
1254                        }
1255                        else {
1256                                bp->line_speed = 0;
1257                                bp->link_up = 0;
1258                        }
1259                }
1260        }
1261        else {
1262                if (bmcr & BMCR_SPEED100) {
1263                        bp->line_speed = SPEED_100;
1264                }
1265                else {
1266                        bp->line_speed = SPEED_10;
1267                }
1268                if (bmcr & BMCR_FULLDPLX) {
1269                        bp->duplex = DUPLEX_FULL;
1270                }
1271                else {
1272                        bp->duplex = DUPLEX_HALF;
1273                }
1274        }
1275
1276        if (bp->link_up) {
1277                u32 ext_status;
1278
1279                bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280                if (ext_status & EXT_STATUS_MDIX)
1281                        bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282        }
1283
1284        return 0;
1285}
1286
1287static void
1288bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289{
1290        u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291
1292        val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293        val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294        val |= 0x02 << 8;
1295
1296        if (bp->flow_ctrl & FLOW_CTRL_TX)
1297                val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298
1299        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300}
1301
1302static void
1303bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304{
1305        int i;
1306        u32 cid;
1307
1308        for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309                if (i == 1)
1310                        cid = RX_RSS_CID;
1311                bnx2_init_rx_context(bp, cid);
1312        }
1313}
1314
1315static void
1316bnx2_set_mac_link(struct bnx2 *bp)
1317{
1318        u32 val;
1319
1320        BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321        if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322                (bp->duplex == DUPLEX_HALF)) {
1323                BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324        }
1325
1326        /* Configure the EMAC mode register. */
1327        val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328
1329        val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330                BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331                BNX2_EMAC_MODE_25G_MODE);
1332
1333        if (bp->link_up) {
1334                switch (bp->line_speed) {
1335                        case SPEED_10:
1336                                if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337                                        val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338                                        break;
1339                                }
1340                                fallthrough;
1341                        case SPEED_100:
1342                                val |= BNX2_EMAC_MODE_PORT_MII;
1343                                break;
1344                        case SPEED_2500:
1345                                val |= BNX2_EMAC_MODE_25G_MODE;
1346                                fallthrough;
1347                        case SPEED_1000:
1348                                val |= BNX2_EMAC_MODE_PORT_GMII;
1349                                break;
1350                }
1351        }
1352        else {
1353                val |= BNX2_EMAC_MODE_PORT_GMII;
1354        }
1355
1356        /* Set the MAC to operate in the appropriate duplex mode. */
1357        if (bp->duplex == DUPLEX_HALF)
1358                val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359        BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360
1361        /* Enable/disable rx PAUSE. */
1362        bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363
1364        if (bp->flow_ctrl & FLOW_CTRL_RX)
1365                bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366        BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367
1368        /* Enable/disable tx PAUSE. */
1369        val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370        val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371
1372        if (bp->flow_ctrl & FLOW_CTRL_TX)
1373                val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374        BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375
1376        /* Acknowledge the interrupt. */
1377        BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378
1379        bnx2_init_all_rx_contexts(bp);
1380}
1381
1382static void
1383bnx2_enable_bmsr1(struct bnx2 *bp)
1384{
1385        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386            (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388                               MII_BNX2_BLK_ADDR_GP_STATUS);
1389}
1390
1391static void
1392bnx2_disable_bmsr1(struct bnx2 *bp)
1393{
1394        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395            (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398}
1399
1400static int
1401bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402{
1403        u32 up1;
1404        int ret = 1;
1405
1406        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407                return 0;
1408
1409        if (bp->autoneg & AUTONEG_SPEED)
1410                bp->advertising |= ADVERTISED_2500baseX_Full;
1411
1412        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414
1415        bnx2_read_phy(bp, bp->mii_up1, &up1);
1416        if (!(up1 & BCM5708S_UP1_2G5)) {
1417                up1 |= BCM5708S_UP1_2G5;
1418                bnx2_write_phy(bp, bp->mii_up1, up1);
1419                ret = 0;
1420        }
1421
1422        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425
1426        return ret;
1427}
1428
1429static int
1430bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431{
1432        u32 up1;
1433        int ret = 0;
1434
1435        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436                return 0;
1437
1438        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440
1441        bnx2_read_phy(bp, bp->mii_up1, &up1);
1442        if (up1 & BCM5708S_UP1_2G5) {
1443                up1 &= ~BCM5708S_UP1_2G5;
1444                bnx2_write_phy(bp, bp->mii_up1, up1);
1445                ret = 1;
1446        }
1447
1448        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451
1452        return ret;
1453}
1454
1455static void
1456bnx2_enable_forced_2g5(struct bnx2 *bp)
1457{
1458        u32 bmcr;
1459        int err;
1460
1461        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462                return;
1463
1464        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465                u32 val;
1466
1467                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1469                if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470                        val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471                        val |= MII_BNX2_SD_MISC1_FORCE |
1472                                MII_BNX2_SD_MISC1_FORCE_2_5G;
1473                        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474                }
1475
1476                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480        } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482                if (!err)
1483                        bmcr |= BCM5708S_BMCR_FORCE_2500;
1484        } else {
1485                return;
1486        }
1487
1488        if (err)
1489                return;
1490
1491        if (bp->autoneg & AUTONEG_SPEED) {
1492                bmcr &= ~BMCR_ANENABLE;
1493                if (bp->req_duplex == DUPLEX_FULL)
1494                        bmcr |= BMCR_FULLDPLX;
1495        }
1496        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497}
1498
1499static void
1500bnx2_disable_forced_2g5(struct bnx2 *bp)
1501{
1502        u32 bmcr;
1503        int err;
1504
1505        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506                return;
1507
1508        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509                u32 val;
1510
1511                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1513                if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514                        val &= ~MII_BNX2_SD_MISC1_FORCE;
1515                        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516                }
1517
1518                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521
1522        } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524                if (!err)
1525                        bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526        } else {
1527                return;
1528        }
1529
1530        if (err)
1531                return;
1532
1533        if (bp->autoneg & AUTONEG_SPEED)
1534                bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536}
1537
1538static void
1539bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540{
1541        u32 val;
1542
1543        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545        if (start)
1546                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547        else
1548                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549}
1550
1551static int
1552bnx2_set_link(struct bnx2 *bp)
1553{
1554        u32 bmsr;
1555        u8 link_up;
1556
1557        if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558                bp->link_up = 1;
1559                return 0;
1560        }
1561
1562        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563                return 0;
1564
1565        link_up = bp->link_up;
1566
1567        bnx2_enable_bmsr1(bp);
1568        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570        bnx2_disable_bmsr1(bp);
1571
1572        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573            (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574                u32 val, an_dbg;
1575
1576                if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577                        bnx2_5706s_force_link_dn(bp, 0);
1578                        bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579                }
1580                val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581
1582                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585
1586                if ((val & BNX2_EMAC_STATUS_LINK) &&
1587                    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588                        bmsr |= BMSR_LSTATUS;
1589                else
1590                        bmsr &= ~BMSR_LSTATUS;
1591        }
1592
1593        if (bmsr & BMSR_LSTATUS) {
1594                bp->link_up = 1;
1595
1596                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597                        if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598                                bnx2_5706s_linkup(bp);
1599                        else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600                                bnx2_5708s_linkup(bp);
1601                        else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602                                bnx2_5709s_linkup(bp);
1603                }
1604                else {
1605                        bnx2_copper_linkup(bp);
1606                }
1607                bnx2_resolve_flow_ctrl(bp);
1608        }
1609        else {
1610                if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611                    (bp->autoneg & AUTONEG_SPEED))
1612                        bnx2_disable_forced_2g5(bp);
1613
1614                if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615                        u32 bmcr;
1616
1617                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618                        bmcr |= BMCR_ANENABLE;
1619                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620
1621                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622                }
1623                bp->link_up = 0;
1624        }
1625
1626        if (bp->link_up != link_up) {
1627                bnx2_report_link(bp);
1628        }
1629
1630        bnx2_set_mac_link(bp);
1631
1632        return 0;
1633}
1634
1635static int
1636bnx2_reset_phy(struct bnx2 *bp)
1637{
1638        int i;
1639        u32 reg;
1640
1641        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642
1643#define PHY_RESET_MAX_WAIT 100
1644        for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645                udelay(10);
1646
1647                bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1648                if (!(reg & BMCR_RESET)) {
1649                        udelay(20);
1650                        break;
1651                }
1652        }
1653        if (i == PHY_RESET_MAX_WAIT) {
1654                return -EBUSY;
1655        }
1656        return 0;
1657}
1658
1659static u32
1660bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661{
1662        u32 adv = 0;
1663
1664        if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665                (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666
1667                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668                        adv = ADVERTISE_1000XPAUSE;
1669                }
1670                else {
1671                        adv = ADVERTISE_PAUSE_CAP;
1672                }
1673        }
1674        else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676                        adv = ADVERTISE_1000XPSE_ASYM;
1677                }
1678                else {
1679                        adv = ADVERTISE_PAUSE_ASYM;
1680                }
1681        }
1682        else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684                        adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685                }
1686                else {
1687                        adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688                }
1689        }
1690        return adv;
1691}
1692
1693static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694
1695static int
1696bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697__releases(&bp->phy_lock)
1698__acquires(&bp->phy_lock)
1699{
1700        u32 speed_arg = 0, pause_adv;
1701
1702        pause_adv = bnx2_phy_get_pause_adv(bp);
1703
1704        if (bp->autoneg & AUTONEG_SPEED) {
1705                speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706                if (bp->advertising & ADVERTISED_10baseT_Half)
1707                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708                if (bp->advertising & ADVERTISED_10baseT_Full)
1709                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710                if (bp->advertising & ADVERTISED_100baseT_Half)
1711                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712                if (bp->advertising & ADVERTISED_100baseT_Full)
1713                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714                if (bp->advertising & ADVERTISED_1000baseT_Full)
1715                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716                if (bp->advertising & ADVERTISED_2500baseX_Full)
1717                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718        } else {
1719                if (bp->req_line_speed == SPEED_2500)
1720                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721                else if (bp->req_line_speed == SPEED_1000)
1722                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723                else if (bp->req_line_speed == SPEED_100) {
1724                        if (bp->req_duplex == DUPLEX_FULL)
1725                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726                        else
1727                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728                } else if (bp->req_line_speed == SPEED_10) {
1729                        if (bp->req_duplex == DUPLEX_FULL)
1730                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731                        else
1732                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733                }
1734        }
1735
1736        if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737                speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738        if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739                speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740
1741        if (port == PORT_TP)
1742                speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743                             BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744
1745        bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746
1747        spin_unlock_bh(&bp->phy_lock);
1748        bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749        spin_lock_bh(&bp->phy_lock);
1750
1751        return 0;
1752}
1753
1754static int
1755bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756__releases(&bp->phy_lock)
1757__acquires(&bp->phy_lock)
1758{
1759        u32 adv, bmcr;
1760        u32 new_adv = 0;
1761
1762        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763                return bnx2_setup_remote_phy(bp, port);
1764
1765        if (!(bp->autoneg & AUTONEG_SPEED)) {
1766                u32 new_bmcr;
1767                int force_link_down = 0;
1768
1769                if (bp->req_line_speed == SPEED_2500) {
1770                        if (!bnx2_test_and_enable_2g5(bp))
1771                                force_link_down = 1;
1772                } else if (bp->req_line_speed == SPEED_1000) {
1773                        if (bnx2_test_and_disable_2g5(bp))
1774                                force_link_down = 1;
1775                }
1776                bnx2_read_phy(bp, bp->mii_adv, &adv);
1777                adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778
1779                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780                new_bmcr = bmcr & ~BMCR_ANENABLE;
1781                new_bmcr |= BMCR_SPEED1000;
1782
1783                if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784                        if (bp->req_line_speed == SPEED_2500)
1785                                bnx2_enable_forced_2g5(bp);
1786                        else if (bp->req_line_speed == SPEED_1000) {
1787                                bnx2_disable_forced_2g5(bp);
1788                                new_bmcr &= ~0x2000;
1789                        }
1790
1791                } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792                        if (bp->req_line_speed == SPEED_2500)
1793                                new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794                        else
1795                                new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796                }
1797
1798                if (bp->req_duplex == DUPLEX_FULL) {
1799                        adv |= ADVERTISE_1000XFULL;
1800                        new_bmcr |= BMCR_FULLDPLX;
1801                }
1802                else {
1803                        adv |= ADVERTISE_1000XHALF;
1804                        new_bmcr &= ~BMCR_FULLDPLX;
1805                }
1806                if ((new_bmcr != bmcr) || (force_link_down)) {
1807                        /* Force a link down visible on the other side */
1808                        if (bp->link_up) {
1809                                bnx2_write_phy(bp, bp->mii_adv, adv &
1810                                               ~(ADVERTISE_1000XFULL |
1811                                                 ADVERTISE_1000XHALF));
1812                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813                                        BMCR_ANRESTART | BMCR_ANENABLE);
1814
1815                                bp->link_up = 0;
1816                                netif_carrier_off(bp->dev);
1817                                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818                                bnx2_report_link(bp);
1819                        }
1820                        bnx2_write_phy(bp, bp->mii_adv, adv);
1821                        bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822                } else {
1823                        bnx2_resolve_flow_ctrl(bp);
1824                        bnx2_set_mac_link(bp);
1825                }
1826                return 0;
1827        }
1828
1829        bnx2_test_and_enable_2g5(bp);
1830
1831        if (bp->advertising & ADVERTISED_1000baseT_Full)
1832                new_adv |= ADVERTISE_1000XFULL;
1833
1834        new_adv |= bnx2_phy_get_pause_adv(bp);
1835
1836        bnx2_read_phy(bp, bp->mii_adv, &adv);
1837        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838
1839        bp->serdes_an_pending = 0;
1840        if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841                /* Force a link down visible on the other side */
1842                if (bp->link_up) {
1843                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844                        spin_unlock_bh(&bp->phy_lock);
1845                        msleep(20);
1846                        spin_lock_bh(&bp->phy_lock);
1847                }
1848
1849                bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850                bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851                        BMCR_ANENABLE);
1852                /* Speed up link-up time when the link partner
1853                 * does not autonegotiate which is very common
1854                 * in blade servers. Some blade servers use
1855                 * IPMI for kerboard input and it's important
1856                 * to minimize link disruptions. Autoneg. involves
1857                 * exchanging base pages plus 3 next pages and
1858                 * normally completes in about 120 msec.
1859                 */
1860                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861                bp->serdes_an_pending = 1;
1862                mod_timer(&bp->timer, jiffies + bp->current_interval);
1863        } else {
1864                bnx2_resolve_flow_ctrl(bp);
1865                bnx2_set_mac_link(bp);
1866        }
1867
1868        return 0;
1869}
1870
1871#define ETHTOOL_ALL_FIBRE_SPEED                                         \
1872        (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1873                (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874                (ADVERTISED_1000baseT_Full)
1875
1876#define ETHTOOL_ALL_COPPER_SPEED                                        \
1877        (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1878        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1879        ADVERTISED_1000baseT_Full)
1880
1881#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882        ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883
1884#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885
1886static void
1887bnx2_set_default_remote_link(struct bnx2 *bp)
1888{
1889        u32 link;
1890
1891        if (bp->phy_port == PORT_TP)
1892                link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893        else
1894                link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895
1896        if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897                bp->req_line_speed = 0;
1898                bp->autoneg |= AUTONEG_SPEED;
1899                bp->advertising = ADVERTISED_Autoneg;
1900                if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901                        bp->advertising |= ADVERTISED_10baseT_Half;
1902                if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903                        bp->advertising |= ADVERTISED_10baseT_Full;
1904                if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905                        bp->advertising |= ADVERTISED_100baseT_Half;
1906                if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907                        bp->advertising |= ADVERTISED_100baseT_Full;
1908                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909                        bp->advertising |= ADVERTISED_1000baseT_Full;
1910                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911                        bp->advertising |= ADVERTISED_2500baseX_Full;
1912        } else {
1913                bp->autoneg = 0;
1914                bp->advertising = 0;
1915                bp->req_duplex = DUPLEX_FULL;
1916                if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917                        bp->req_line_speed = SPEED_10;
1918                        if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919                                bp->req_duplex = DUPLEX_HALF;
1920                }
1921                if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922                        bp->req_line_speed = SPEED_100;
1923                        if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924                                bp->req_duplex = DUPLEX_HALF;
1925                }
1926                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927                        bp->req_line_speed = SPEED_1000;
1928                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929                        bp->req_line_speed = SPEED_2500;
1930        }
1931}
1932
1933static void
1934bnx2_set_default_link(struct bnx2 *bp)
1935{
1936        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937                bnx2_set_default_remote_link(bp);
1938                return;
1939        }
1940
1941        bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942        bp->req_line_speed = 0;
1943        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944                u32 reg;
1945
1946                bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947
1948                reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949                reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950                if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951                        bp->autoneg = 0;
1952                        bp->req_line_speed = bp->line_speed = SPEED_1000;
1953                        bp->req_duplex = DUPLEX_FULL;
1954                }
1955        } else
1956                bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957}
1958
1959static void
1960bnx2_send_heart_beat(struct bnx2 *bp)
1961{
1962        u32 msg;
1963        u32 addr;
1964
1965        spin_lock(&bp->indirect_lock);
1966        msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967        addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968        BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969        BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970        spin_unlock(&bp->indirect_lock);
1971}
1972
1973static void
1974bnx2_remote_phy_event(struct bnx2 *bp)
1975{
1976        u32 msg;
1977        u8 link_up = bp->link_up;
1978        u8 old_port;
1979
1980        msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981
1982        if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983                bnx2_send_heart_beat(bp);
1984
1985        msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986
1987        if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988                bp->link_up = 0;
1989        else {
1990                u32 speed;
1991
1992                bp->link_up = 1;
1993                speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994                bp->duplex = DUPLEX_FULL;
1995                switch (speed) {
1996                        case BNX2_LINK_STATUS_10HALF:
1997                                bp->duplex = DUPLEX_HALF;
1998                                fallthrough;
1999                        case BNX2_LINK_STATUS_10FULL:
2000                                bp->line_speed = SPEED_10;
2001                                break;
2002                        case BNX2_LINK_STATUS_100HALF:
2003                                bp->duplex = DUPLEX_HALF;
2004                                fallthrough;
2005                        case BNX2_LINK_STATUS_100BASE_T4:
2006                        case BNX2_LINK_STATUS_100FULL:
2007                                bp->line_speed = SPEED_100;
2008                                break;
2009                        case BNX2_LINK_STATUS_1000HALF:
2010                                bp->duplex = DUPLEX_HALF;
2011                                fallthrough;
2012                        case BNX2_LINK_STATUS_1000FULL:
2013                                bp->line_speed = SPEED_1000;
2014                                break;
2015                        case BNX2_LINK_STATUS_2500HALF:
2016                                bp->duplex = DUPLEX_HALF;
2017                                fallthrough;
2018                        case BNX2_LINK_STATUS_2500FULL:
2019                                bp->line_speed = SPEED_2500;
2020                                break;
2021                        default:
2022                                bp->line_speed = 0;
2023                                break;
2024                }
2025
2026                bp->flow_ctrl = 0;
2027                if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028                    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029                        if (bp->duplex == DUPLEX_FULL)
2030                                bp->flow_ctrl = bp->req_flow_ctrl;
2031                } else {
2032                        if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033                                bp->flow_ctrl |= FLOW_CTRL_TX;
2034                        if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035                                bp->flow_ctrl |= FLOW_CTRL_RX;
2036                }
2037
2038                old_port = bp->phy_port;
2039                if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040                        bp->phy_port = PORT_FIBRE;
2041                else
2042                        bp->phy_port = PORT_TP;
2043
2044                if (old_port != bp->phy_port)
2045                        bnx2_set_default_link(bp);
2046
2047        }
2048        if (bp->link_up != link_up)
2049                bnx2_report_link(bp);
2050
2051        bnx2_set_mac_link(bp);
2052}
2053
2054static int
2055bnx2_set_remote_link(struct bnx2 *bp)
2056{
2057        u32 evt_code;
2058
2059        evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060        switch (evt_code) {
2061                case BNX2_FW_EVT_CODE_LINK_EVENT:
2062                        bnx2_remote_phy_event(bp);
2063                        break;
2064                case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065                default:
2066                        bnx2_send_heart_beat(bp);
2067                        break;
2068        }
2069        return 0;
2070}
2071
2072static int
2073bnx2_setup_copper_phy(struct bnx2 *bp)
2074__releases(&bp->phy_lock)
2075__acquires(&bp->phy_lock)
2076{
2077        u32 bmcr, adv_reg, new_adv = 0;
2078        u32 new_bmcr;
2079
2080        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081
2082        bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083        adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084                    ADVERTISE_PAUSE_ASYM);
2085
2086        new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087
2088        if (bp->autoneg & AUTONEG_SPEED) {
2089                u32 adv1000_reg;
2090                u32 new_adv1000 = 0;
2091
2092                new_adv |= bnx2_phy_get_pause_adv(bp);
2093
2094                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095                adv1000_reg &= PHY_ALL_1000_SPEED;
2096
2097                new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098                if ((adv1000_reg != new_adv1000) ||
2099                        (adv_reg != new_adv) ||
2100                        ((bmcr & BMCR_ANENABLE) == 0)) {
2101
2102                        bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103                        bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105                                BMCR_ANENABLE);
2106                }
2107                else if (bp->link_up) {
2108                        /* Flow ctrl may have changed from auto to forced */
2109                        /* or vice-versa. */
2110
2111                        bnx2_resolve_flow_ctrl(bp);
2112                        bnx2_set_mac_link(bp);
2113                }
2114                return 0;
2115        }
2116
2117        /* advertise nothing when forcing speed */
2118        if (adv_reg != new_adv)
2119                bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120
2121        new_bmcr = 0;
2122        if (bp->req_line_speed == SPEED_100) {
2123                new_bmcr |= BMCR_SPEED100;
2124        }
2125        if (bp->req_duplex == DUPLEX_FULL) {
2126                new_bmcr |= BMCR_FULLDPLX;
2127        }
2128        if (new_bmcr != bmcr) {
2129                u32 bmsr;
2130
2131                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134                if (bmsr & BMSR_LSTATUS) {
2135                        /* Force link down */
2136                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137                        spin_unlock_bh(&bp->phy_lock);
2138                        msleep(50);
2139                        spin_lock_bh(&bp->phy_lock);
2140
2141                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143                }
2144
2145                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147                /* Normally, the new speed is setup after the link has
2148                 * gone down and up again. In some cases, link will not go
2149                 * down so we need to set up the new speed here.
2150                 */
2151                if (bmsr & BMSR_LSTATUS) {
2152                        bp->line_speed = bp->req_line_speed;
2153                        bp->duplex = bp->req_duplex;
2154                        bnx2_resolve_flow_ctrl(bp);
2155                        bnx2_set_mac_link(bp);
2156                }
2157        } else {
2158                bnx2_resolve_flow_ctrl(bp);
2159                bnx2_set_mac_link(bp);
2160        }
2161        return 0;
2162}
2163
2164static int
2165bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166__releases(&bp->phy_lock)
2167__acquires(&bp->phy_lock)
2168{
2169        if (bp->loopback == MAC_LOOPBACK)
2170                return 0;
2171
2172        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173                return bnx2_setup_serdes_phy(bp, port);
2174        }
2175        else {
2176                return bnx2_setup_copper_phy(bp);
2177        }
2178}
2179
2180static int
2181bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182{
2183        u32 val;
2184
2185        bp->mii_bmcr = MII_BMCR + 0x10;
2186        bp->mii_bmsr = MII_BMSR + 0x10;
2187        bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188        bp->mii_adv = MII_ADVERTISE + 0x10;
2189        bp->mii_lpa = MII_LPA + 0x10;
2190        bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193        bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196        if (reset_phy)
2197                bnx2_reset_phy(bp);
2198
2199        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201        bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202        val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203        val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207        bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209                val |= BCM5708S_UP1_2G5;
2210        else
2211                val &= ~BCM5708S_UP1_2G5;
2212        bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215        bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216        val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217        bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221        val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222              MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223        bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227        return 0;
2228}
2229
2230static int
2231bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232{
2233        u32 val;
2234
2235        if (reset_phy)
2236                bnx2_reset_phy(bp);
2237
2238        bp->mii_up1 = BCM5708S_UP1;
2239
2240        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241        bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244        bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245        val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246        bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248        bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249        val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250        bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253                bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254                val |= BCM5708S_UP1_2G5;
2255                bnx2_write_phy(bp, BCM5708S_UP1, val);
2256        }
2257
2258        if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261                /* increase tx signal amplitude */
2262                bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                               BCM5708S_BLK_ADDR_TX_MISC);
2264                bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265                val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266                bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267                bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268        }
2269
2270        val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271              BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273        if (val) {
2274                u32 is_backplane;
2275
2276                is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277                if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279                                       BCM5708S_BLK_ADDR_TX_MISC);
2280                        bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282                                       BCM5708S_BLK_ADDR_DIG);
2283                }
2284        }
2285        return 0;
2286}
2287
2288static int
2289bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290{
2291        if (reset_phy)
2292                bnx2_reset_phy(bp);
2293
2294        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296        if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297                BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299        if (bp->dev->mtu > ETH_DATA_LEN) {
2300                u32 val;
2301
2302                /* Set extended packet length bit */
2303                bnx2_write_phy(bp, 0x18, 0x7);
2304                bnx2_read_phy(bp, 0x18, &val);
2305                bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307                bnx2_write_phy(bp, 0x1c, 0x6c00);
2308                bnx2_read_phy(bp, 0x1c, &val);
2309                bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310        }
2311        else {
2312                u32 val;
2313
2314                bnx2_write_phy(bp, 0x18, 0x7);
2315                bnx2_read_phy(bp, 0x18, &val);
2316                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318                bnx2_write_phy(bp, 0x1c, 0x6c00);
2319                bnx2_read_phy(bp, 0x1c, &val);
2320                bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321        }
2322
2323        return 0;
2324}
2325
2326static int
2327bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328{
2329        u32 val;
2330
2331        if (reset_phy)
2332                bnx2_reset_phy(bp);
2333
2334        if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335                bnx2_write_phy(bp, 0x18, 0x0c00);
2336                bnx2_write_phy(bp, 0x17, 0x000a);
2337                bnx2_write_phy(bp, 0x15, 0x310b);
2338                bnx2_write_phy(bp, 0x17, 0x201f);
2339                bnx2_write_phy(bp, 0x15, 0x9506);
2340                bnx2_write_phy(bp, 0x17, 0x401f);
2341                bnx2_write_phy(bp, 0x15, 0x14e2);
2342                bnx2_write_phy(bp, 0x18, 0x0400);
2343        }
2344
2345        if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346                bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347                               MII_BNX2_DSP_EXPAND_REG | 0x8);
2348                bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349                val &= ~(1 << 8);
2350                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351        }
2352
2353        if (bp->dev->mtu > ETH_DATA_LEN) {
2354                /* Set extended packet length bit */
2355                bnx2_write_phy(bp, 0x18, 0x7);
2356                bnx2_read_phy(bp, 0x18, &val);
2357                bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359                bnx2_read_phy(bp, 0x10, &val);
2360                bnx2_write_phy(bp, 0x10, val | 0x1);
2361        }
2362        else {
2363                bnx2_write_phy(bp, 0x18, 0x7);
2364                bnx2_read_phy(bp, 0x18, &val);
2365                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367                bnx2_read_phy(bp, 0x10, &val);
2368                bnx2_write_phy(bp, 0x10, val & ~0x1);
2369        }
2370
2371        /* ethernet@wirespeed */
2372        bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373        bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374        val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375
2376        /* auto-mdix */
2377        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378                val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2379
2380        bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381        return 0;
2382}
2383
2384
2385static int
2386bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387__releases(&bp->phy_lock)
2388__acquires(&bp->phy_lock)
2389{
2390        u32 val;
2391        int rc = 0;
2392
2393        bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394        bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395
2396        bp->mii_bmcr = MII_BMCR;
2397        bp->mii_bmsr = MII_BMSR;
2398        bp->mii_bmsr1 = MII_BMSR;
2399        bp->mii_adv = MII_ADVERTISE;
2400        bp->mii_lpa = MII_LPA;
2401
2402        BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403
2404        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405                goto setup_phy;
2406
2407        bnx2_read_phy(bp, MII_PHYSID1, &val);
2408        bp->phy_id = val << 16;
2409        bnx2_read_phy(bp, MII_PHYSID2, &val);
2410        bp->phy_id |= val & 0xffff;
2411
2412        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413                if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414                        rc = bnx2_init_5706s_phy(bp, reset_phy);
2415                else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416                        rc = bnx2_init_5708s_phy(bp, reset_phy);
2417                else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418                        rc = bnx2_init_5709s_phy(bp, reset_phy);
2419        }
2420        else {
2421                rc = bnx2_init_copper_phy(bp, reset_phy);
2422        }
2423
2424setup_phy:
2425        if (!rc)
2426                rc = bnx2_setup_phy(bp, bp->phy_port);
2427
2428        return rc;
2429}
2430
2431static int
2432bnx2_set_mac_loopback(struct bnx2 *bp)
2433{
2434        u32 mac_mode;
2435
2436        mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437        mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438        mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439        BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440        bp->link_up = 1;
2441        return 0;
2442}
2443
2444static int bnx2_test_link(struct bnx2 *);
2445
2446static int
2447bnx2_set_phy_loopback(struct bnx2 *bp)
2448{
2449        u32 mac_mode;
2450        int rc, i;
2451
2452        spin_lock_bh(&bp->phy_lock);
2453        rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454                            BMCR_SPEED1000);
2455        spin_unlock_bh(&bp->phy_lock);
2456        if (rc)
2457                return rc;
2458
2459        for (i = 0; i < 10; i++) {
2460                if (bnx2_test_link(bp) == 0)
2461                        break;
2462                msleep(100);
2463        }
2464
2465        mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466        mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467                      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468                      BNX2_EMAC_MODE_25G_MODE);
2469
2470        mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471        BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472        bp->link_up = 1;
2473        return 0;
2474}
2475
2476static void
2477bnx2_dump_mcp_state(struct bnx2 *bp)
2478{
2479        struct net_device *dev = bp->dev;
2480        u32 mcp_p0, mcp_p1;
2481
2482        netdev_err(dev, "<--- start MCP states dump --->\n");
2483        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484                mcp_p0 = BNX2_MCP_STATE_P0;
2485                mcp_p1 = BNX2_MCP_STATE_P1;
2486        } else {
2487                mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488                mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489        }
2490        netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491                   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492        netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496        netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499                   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500        netdev_err(dev, "DEBUG: shmem states:\n");
2501        netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502                   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503                   bnx2_shmem_rd(bp, BNX2_FW_MB),
2504                   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505        pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506        netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507                   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508                   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509        pr_cont(" condition[%08x]\n",
2510                bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511        DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512        DP_SHMEM_LINE(bp, 0x3cc);
2513        DP_SHMEM_LINE(bp, 0x3dc);
2514        DP_SHMEM_LINE(bp, 0x3ec);
2515        netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516        netdev_err(dev, "<--- end MCP states dump --->\n");
2517}
2518
2519static int
2520bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521{
2522        int i;
2523        u32 val;
2524
2525        bp->fw_wr_seq++;
2526        msg_data |= bp->fw_wr_seq;
2527        bp->fw_last_msg = msg_data;
2528
2529        bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530
2531        if (!ack)
2532                return 0;
2533
2534        /* wait for an acknowledgement. */
2535        for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536                msleep(10);
2537
2538                val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539
2540                if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541                        break;
2542        }
2543        if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544                return 0;
2545
2546        /* If we timed out, inform the firmware that this is the case. */
2547        if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548                msg_data &= ~BNX2_DRV_MSG_CODE;
2549                msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550
2551                bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552                if (!silent) {
2553                        pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554                        bnx2_dump_mcp_state(bp);
2555                }
2556
2557                return -EBUSY;
2558        }
2559
2560        if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561                return -EIO;
2562
2563        return 0;
2564}
2565
2566static int
2567bnx2_init_5709_context(struct bnx2 *bp)
2568{
2569        int i, ret = 0;
2570        u32 val;
2571
2572        val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573        val |= (BNX2_PAGE_BITS - 8) << 16;
2574        BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575        for (i = 0; i < 10; i++) {
2576                val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577                if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578                        break;
2579                udelay(2);
2580        }
2581        if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582                return -EBUSY;
2583
2584        for (i = 0; i < bp->ctx_pages; i++) {
2585                int j;
2586
2587                if (bp->ctx_blk[i])
2588                        memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589                else
2590                        return -ENOMEM;
2591
2592                BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595                BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596                        (u64) bp->ctx_blk_mapping[i] >> 32);
2597                BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599                for (j = 0; j < 10; j++) {
2600
2601                        val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602                        if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603                                break;
2604                        udelay(5);
2605                }
2606                if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607                        ret = -EBUSY;
2608                        break;
2609                }
2610        }
2611        return ret;
2612}
2613
2614static void
2615bnx2_init_context(struct bnx2 *bp)
2616{
2617        u32 vcid;
2618
2619        vcid = 96;
2620        while (vcid) {
2621                u32 vcid_addr, pcid_addr, offset;
2622                int i;
2623
2624                vcid--;
2625
2626                if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627                        u32 new_vcid;
2628
2629                        vcid_addr = GET_PCID_ADDR(vcid);
2630                        if (vcid & 0x8) {
2631                                new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632                        }
2633                        else {
2634                                new_vcid = vcid;
2635                        }
2636                        pcid_addr = GET_PCID_ADDR(new_vcid);
2637                }
2638                else {
2639                        vcid_addr = GET_CID_ADDR(vcid);
2640                        pcid_addr = vcid_addr;
2641                }
2642
2643                for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644                        vcid_addr += (i << PHY_CTX_SHIFT);
2645                        pcid_addr += (i << PHY_CTX_SHIFT);
2646
2647                        BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648                        BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649
2650                        /* Zero out the context. */
2651                        for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652                                bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653                }
2654        }
2655}
2656
2657static int
2658bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659{
2660        u16 *good_mbuf;
2661        u32 good_mbuf_cnt;
2662        u32 val;
2663
2664        good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665        if (!good_mbuf)
2666                return -ENOMEM;
2667
2668        BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669                BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670
2671        good_mbuf_cnt = 0;
2672
2673        /* Allocate a bunch of mbufs and save the good ones in an array. */
2674        val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675        while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676                bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677                                BNX2_RBUF_COMMAND_ALLOC_REQ);
2678
2679                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680
2681                val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682
2683                /* The addresses with Bit 9 set are bad memory blocks. */
2684                if (!(val & (1 << 9))) {
2685                        good_mbuf[good_mbuf_cnt] = (u16) val;
2686                        good_mbuf_cnt++;
2687                }
2688
2689                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690        }
2691
2692        /* Free the good ones back to the mbuf pool thus discarding
2693         * all the bad ones. */
2694        while (good_mbuf_cnt) {
2695                good_mbuf_cnt--;
2696
2697                val = good_mbuf[good_mbuf_cnt];
2698                val = (val << 9) | val | 1;
2699
2700                bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701        }
2702        kfree(good_mbuf);
2703        return 0;
2704}
2705
2706static void
2707bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2708{
2709        u32 val;
2710
2711        val = (mac_addr[0] << 8) | mac_addr[1];
2712
2713        BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714
2715        val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716                (mac_addr[4] << 8) | mac_addr[5];
2717
2718        BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719}
2720
2721static inline int
2722bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723{
2724        dma_addr_t mapping;
2725        struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726        struct bnx2_rx_bd *rxbd =
2727                &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728        struct page *page = alloc_page(gfp);
2729
2730        if (!page)
2731                return -ENOMEM;
2732        mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733                               DMA_FROM_DEVICE);
2734        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735                __free_page(page);
2736                return -EIO;
2737        }
2738
2739        rx_pg->page = page;
2740        dma_unmap_addr_set(rx_pg, mapping, mapping);
2741        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743        return 0;
2744}
2745
2746static void
2747bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748{
2749        struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750        struct page *page = rx_pg->page;
2751
2752        if (!page)
2753                return;
2754
2755        dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756                       PAGE_SIZE, DMA_FROM_DEVICE);
2757
2758        __free_page(page);
2759        rx_pg->page = NULL;
2760}
2761
2762static inline int
2763bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764{
2765        u8 *data;
2766        struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767        dma_addr_t mapping;
2768        struct bnx2_rx_bd *rxbd =
2769                &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770
2771        data = kmalloc(bp->rx_buf_size, gfp);
2772        if (!data)
2773                return -ENOMEM;
2774
2775        mapping = dma_map_single(&bp->pdev->dev,
2776                                 get_l2_fhdr(data),
2777                                 bp->rx_buf_use_size,
2778                                 DMA_FROM_DEVICE);
2779        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780                kfree(data);
2781                return -EIO;
2782        }
2783
2784        rx_buf->data = data;
2785        dma_unmap_addr_set(rx_buf, mapping, mapping);
2786
2787        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789
2790        rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791
2792        return 0;
2793}
2794
2795static int
2796bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797{
2798        struct status_block *sblk = bnapi->status_blk.msi;
2799        u32 new_link_state, old_link_state;
2800        int is_set = 1;
2801
2802        new_link_state = sblk->status_attn_bits & event;
2803        old_link_state = sblk->status_attn_bits_ack & event;
2804        if (new_link_state != old_link_state) {
2805                if (new_link_state)
2806                        BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807                else
2808                        BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809        } else
2810                is_set = 0;
2811
2812        return is_set;
2813}
2814
2815static void
2816bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817{
2818        spin_lock(&bp->phy_lock);
2819
2820        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821                bnx2_set_link(bp);
2822        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823                bnx2_set_remote_link(bp);
2824
2825        spin_unlock(&bp->phy_lock);
2826
2827}
2828
2829static inline u16
2830bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831{
2832        u16 cons;
2833
2834        cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835
2836        if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837                cons++;
2838        return cons;
2839}
2840
2841static int
2842bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843{
2844        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845        u16 hw_cons, sw_cons, sw_ring_cons;
2846        int tx_pkt = 0, index;
2847        unsigned int tx_bytes = 0;
2848        struct netdev_queue *txq;
2849
2850        index = (bnapi - bp->bnx2_napi);
2851        txq = netdev_get_tx_queue(bp->dev, index);
2852
2853        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854        sw_cons = txr->tx_cons;
2855
2856        while (sw_cons != hw_cons) {
2857                struct bnx2_sw_tx_bd *tx_buf;
2858                struct sk_buff *skb;
2859                int i, last;
2860
2861                sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863                tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864                skb = tx_buf->skb;
2865
2866                /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867                prefetch(&skb->end);
2868
2869                /* partial BD completions possible with TSO packets */
2870                if (tx_buf->is_gso) {
2871                        u16 last_idx, last_ring_idx;
2872
2873                        last_idx = sw_cons + tx_buf->nr_frags + 1;
2874                        last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875                        if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876                                last_idx++;
2877                        }
2878                        if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879                                break;
2880                        }
2881                }
2882
2883                dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884                        skb_headlen(skb), DMA_TO_DEVICE);
2885
2886                tx_buf->skb = NULL;
2887                last = tx_buf->nr_frags;
2888
2889                for (i = 0; i < last; i++) {
2890                        struct bnx2_sw_tx_bd *tx_buf;
2891
2892                        sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894                        tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895                        dma_unmap_page(&bp->pdev->dev,
2896                                dma_unmap_addr(tx_buf, mapping),
2897                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898                                DMA_TO_DEVICE);
2899                }
2900
2901                sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903                tx_bytes += skb->len;
2904                dev_kfree_skb_any(skb);
2905                tx_pkt++;
2906                if (tx_pkt == budget)
2907                        break;
2908
2909                if (hw_cons == sw_cons)
2910                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911        }
2912
2913        netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914        txr->hw_tx_cons = hw_cons;
2915        txr->tx_cons = sw_cons;
2916
2917        /* Need to make the tx_cons update visible to bnx2_start_xmit()
2918         * before checking for netif_tx_queue_stopped().  Without the
2919         * memory barrier, there is a small possibility that bnx2_start_xmit()
2920         * will miss it and cause the queue to be stopped forever.
2921         */
2922        smp_mb();
2923
2924        if (unlikely(netif_tx_queue_stopped(txq)) &&
2925                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926                __netif_tx_lock(txq, smp_processor_id());
2927                if ((netif_tx_queue_stopped(txq)) &&
2928                    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929                        netif_tx_wake_queue(txq);
2930                __netif_tx_unlock(txq);
2931        }
2932
2933        return tx_pkt;
2934}
2935
2936static void
2937bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938                        struct sk_buff *skb, int count)
2939{
2940        struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941        struct bnx2_rx_bd *cons_bd, *prod_bd;
2942        int i;
2943        u16 hw_prod, prod;
2944        u16 cons = rxr->rx_pg_cons;
2945
2946        cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948        /* The caller was unable to allocate a new page to replace the
2949         * last one in the frags array, so we need to recycle that page
2950         * and then free the skb.
2951         */
2952        if (skb) {
2953                struct page *page;
2954                struct skb_shared_info *shinfo;
2955
2956                shinfo = skb_shinfo(skb);
2957                shinfo->nr_frags--;
2958                page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959                __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960
2961                cons_rx_pg->page = page;
2962                dev_kfree_skb(skb);
2963        }
2964
2965        hw_prod = rxr->rx_pg_prod;
2966
2967        for (i = 0; i < count; i++) {
2968                prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969
2970                prod_rx_pg = &rxr->rx_pg_ring[prod];
2971                cons_rx_pg = &rxr->rx_pg_ring[cons];
2972                cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973                                                [BNX2_RX_IDX(cons)];
2974                prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975                                                [BNX2_RX_IDX(prod)];
2976
2977                if (prod != cons) {
2978                        prod_rx_pg->page = cons_rx_pg->page;
2979                        cons_rx_pg->page = NULL;
2980                        dma_unmap_addr_set(prod_rx_pg, mapping,
2981                                dma_unmap_addr(cons_rx_pg, mapping));
2982
2983                        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984                        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985
2986                }
2987                cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988                hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989        }
2990        rxr->rx_pg_prod = hw_prod;
2991        rxr->rx_pg_cons = cons;
2992}
2993
2994static inline void
2995bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996                   u8 *data, u16 cons, u16 prod)
2997{
2998        struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999        struct bnx2_rx_bd *cons_bd, *prod_bd;
3000
3001        cons_rx_buf = &rxr->rx_buf_ring[cons];
3002        prod_rx_buf = &rxr->rx_buf_ring[prod];
3003
3004        dma_sync_single_for_device(&bp->pdev->dev,
3005                dma_unmap_addr(cons_rx_buf, mapping),
3006                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3007
3008        rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009
3010        prod_rx_buf->data = data;
3011
3012        if (cons == prod)
3013                return;
3014
3015        dma_unmap_addr_set(prod_rx_buf, mapping,
3016                        dma_unmap_addr(cons_rx_buf, mapping));
3017
3018        cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019        prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022}
3023
3024static struct sk_buff *
3025bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027            u32 ring_idx)
3028{
3029        int err;
3030        u16 prod = ring_idx & 0xffff;
3031        struct sk_buff *skb;
3032
3033        err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034        if (unlikely(err)) {
3035                bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036error:
3037                if (hdr_len) {
3038                        unsigned int raw_len = len + 4;
3039                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040
3041                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042                }
3043                return NULL;
3044        }
3045
3046        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047                         DMA_FROM_DEVICE);
3048        skb = build_skb(data, 0);
3049        if (!skb) {
3050                kfree(data);
3051                goto error;
3052        }
3053        skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054        if (hdr_len == 0) {
3055                skb_put(skb, len);
3056                return skb;
3057        } else {
3058                unsigned int i, frag_len, frag_size, pages;
3059                struct bnx2_sw_pg *rx_pg;
3060                u16 pg_cons = rxr->rx_pg_cons;
3061                u16 pg_prod = rxr->rx_pg_prod;
3062
3063                frag_size = len + 4 - hdr_len;
3064                pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065                skb_put(skb, hdr_len);
3066
3067                for (i = 0; i < pages; i++) {
3068                        dma_addr_t mapping_old;
3069
3070                        frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071                        if (unlikely(frag_len <= 4)) {
3072                                unsigned int tail = 4 - frag_len;
3073
3074                                rxr->rx_pg_cons = pg_cons;
3075                                rxr->rx_pg_prod = pg_prod;
3076                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077                                                        pages - i);
3078                                skb->len -= tail;
3079                                if (i == 0) {
3080                                        skb->tail -= tail;
3081                                } else {
3082                                        skb_frag_t *frag =
3083                                                &skb_shinfo(skb)->frags[i - 1];
3084                                        skb_frag_size_sub(frag, tail);
3085                                        skb->data_len -= tail;
3086                                }
3087                                return skb;
3088                        }
3089                        rx_pg = &rxr->rx_pg_ring[pg_cons];
3090
3091                        /* Don't unmap yet.  If we're unable to allocate a new
3092                         * page, we need to recycle the page and the DMA addr.
3093                         */
3094                        mapping_old = dma_unmap_addr(rx_pg, mapping);
3095                        if (i == pages - 1)
3096                                frag_len -= 4;
3097
3098                        skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099                        rx_pg->page = NULL;
3100
3101                        err = bnx2_alloc_rx_page(bp, rxr,
3102                                                 BNX2_RX_PG_RING_IDX(pg_prod),
3103                                                 GFP_ATOMIC);
3104                        if (unlikely(err)) {
3105                                rxr->rx_pg_cons = pg_cons;
3106                                rxr->rx_pg_prod = pg_prod;
3107                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108                                                        pages - i);
3109                                return NULL;
3110                        }
3111
3112                        dma_unmap_page(&bp->pdev->dev, mapping_old,
3113                                       PAGE_SIZE, DMA_FROM_DEVICE);
3114
3115                        frag_size -= frag_len;
3116                        skb->data_len += frag_len;
3117                        skb->truesize += PAGE_SIZE;
3118                        skb->len += frag_len;
3119
3120                        pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121                        pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122                }
3123                rxr->rx_pg_prod = pg_prod;
3124                rxr->rx_pg_cons = pg_cons;
3125        }
3126        return skb;
3127}
3128
3129static inline u16
3130bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131{
3132        u16 cons;
3133
3134        cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3135
3136        if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3137                cons++;
3138        return cons;
3139}
3140
3141static int
3142bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3143{
3144        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3145        u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3146        struct l2_fhdr *rx_hdr;
3147        int rx_pkt = 0, pg_ring_used = 0;
3148
3149        if (budget <= 0)
3150                return rx_pkt;
3151
3152        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3153        sw_cons = rxr->rx_cons;
3154        sw_prod = rxr->rx_prod;
3155
3156        /* Memory barrier necessary as speculative reads of the rx
3157         * buffer can be ahead of the index in the status block
3158         */
3159        rmb();
3160        while (sw_cons != hw_cons) {
3161                unsigned int len, hdr_len;
3162                u32 status;
3163                struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3164                struct sk_buff *skb;
3165                dma_addr_t dma_addr;
3166                u8 *data;
3167                u16 next_ring_idx;
3168
3169                sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3170                sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3171
3172                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3173                data = rx_buf->data;
3174                rx_buf->data = NULL;
3175
3176                rx_hdr = get_l2_fhdr(data);
3177                prefetch(rx_hdr);
3178
3179                dma_addr = dma_unmap_addr(rx_buf, mapping);
3180
3181                dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3182                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3183                        DMA_FROM_DEVICE);
3184
3185                next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3186                next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3187                prefetch(get_l2_fhdr(next_rx_buf->data));
3188
3189                len = rx_hdr->l2_fhdr_pkt_len;
3190                status = rx_hdr->l2_fhdr_status;
3191
3192                hdr_len = 0;
3193                if (status & L2_FHDR_STATUS_SPLIT) {
3194                        hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3195                        pg_ring_used = 1;
3196                } else if (len > bp->rx_jumbo_thresh) {
3197                        hdr_len = bp->rx_jumbo_thresh;
3198                        pg_ring_used = 1;
3199                }
3200
3201                if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3202                                       L2_FHDR_ERRORS_PHY_DECODE |
3203                                       L2_FHDR_ERRORS_ALIGNMENT |
3204                                       L2_FHDR_ERRORS_TOO_SHORT |
3205                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
3206
3207                        bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208                                          sw_ring_prod);
3209                        if (pg_ring_used) {
3210                                int pages;
3211
3212                                pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3213
3214                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3215                        }
3216                        goto next_rx;
3217                }
3218
3219                len -= 4;
3220
3221                if (len <= bp->rx_copy_thresh) {
3222                        skb = netdev_alloc_skb(bp->dev, len + 6);
3223                        if (!skb) {
3224                                bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3225                                                  sw_ring_prod);
3226                                goto next_rx;
3227                        }
3228
3229                        /* aligned copy */
3230                        memcpy(skb->data,
3231                               (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3232                               len + 6);
3233                        skb_reserve(skb, 6);
3234                        skb_put(skb, len);
3235
3236                        bnx2_reuse_rx_data(bp, rxr, data,
3237                                sw_ring_cons, sw_ring_prod);
3238
3239                } else {
3240                        skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3241                                          (sw_ring_cons << 16) | sw_ring_prod);
3242                        if (!skb)
3243                                goto next_rx;
3244                }
3245                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3246                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3248
3249                skb->protocol = eth_type_trans(skb, bp->dev);
3250
3251                if (len > (bp->dev->mtu + ETH_HLEN) &&
3252                    skb->protocol != htons(0x8100) &&
3253                    skb->protocol != htons(ETH_P_8021AD)) {
3254
3255                        dev_kfree_skb(skb);
3256                        goto next_rx;
3257
3258                }
3259
3260                skb_checksum_none_assert(skb);
3261                if ((bp->dev->features & NETIF_F_RXCSUM) &&
3262                        (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3263                        L2_FHDR_STATUS_UDP_DATAGRAM))) {
3264
3265                        if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3266                                              L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3267                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3268                }
3269                if ((bp->dev->features & NETIF_F_RXHASH) &&
3270                    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3271                     L2_FHDR_STATUS_USE_RXHASH))
3272                        skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3273                                     PKT_HASH_TYPE_L3);
3274
3275                skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3276                napi_gro_receive(&bnapi->napi, skb);
3277                rx_pkt++;
3278
3279next_rx:
3280                sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3281                sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3282
3283                if (rx_pkt == budget)
3284                        break;
3285
3286                /* Refresh hw_cons to see if there is new work */
3287                if (sw_cons == hw_cons) {
3288                        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3289                        rmb();
3290                }
3291        }
3292        rxr->rx_cons = sw_cons;
3293        rxr->rx_prod = sw_prod;
3294
3295        if (pg_ring_used)
3296                BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3297
3298        BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3299
3300        BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3301
3302        return rx_pkt;
3303
3304}
3305
3306/* MSI ISR - The only difference between this and the INTx ISR
3307 * is that the MSI interrupt is always serviced.
3308 */
3309static irqreturn_t
3310bnx2_msi(int irq, void *dev_instance)
3311{
3312        struct bnx2_napi *bnapi = dev_instance;
3313        struct bnx2 *bp = bnapi->bp;
3314
3315        prefetch(bnapi->status_blk.msi);
3316        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3317                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3318                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3319
3320        /* Return here if interrupt is disabled. */
3321        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322                return IRQ_HANDLED;
3323
3324        napi_schedule(&bnapi->napi);
3325
3326        return IRQ_HANDLED;
3327}
3328
3329static irqreturn_t
3330bnx2_msi_1shot(int irq, void *dev_instance)
3331{
3332        struct bnx2_napi *bnapi = dev_instance;
3333        struct bnx2 *bp = bnapi->bp;
3334
3335        prefetch(bnapi->status_blk.msi);
3336
3337        /* Return here if interrupt is disabled. */
3338        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339                return IRQ_HANDLED;
3340
3341        napi_schedule(&bnapi->napi);
3342
3343        return IRQ_HANDLED;
3344}
3345
3346static irqreturn_t
3347bnx2_interrupt(int irq, void *dev_instance)
3348{
3349        struct bnx2_napi *bnapi = dev_instance;
3350        struct bnx2 *bp = bnapi->bp;
3351        struct status_block *sblk = bnapi->status_blk.msi;
3352
3353        /* When using INTx, it is possible for the interrupt to arrive
3354         * at the CPU before the status block posted prior to the
3355         * interrupt. Reading a register will flush the status block.
3356         * When using MSI, the MSI message will always complete after
3357         * the status block write.
3358         */
3359        if ((sblk->status_idx == bnapi->last_status_idx) &&
3360            (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3361             BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3362                return IRQ_NONE;
3363
3364        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3365                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3366                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367
3368        /* Read back to deassert IRQ immediately to avoid too many
3369         * spurious interrupts.
3370         */
3371        BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372
3373        /* Return here if interrupt is shared and is disabled. */
3374        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3375                return IRQ_HANDLED;
3376
3377        if (napi_schedule_prep(&bnapi->napi)) {
3378                bnapi->last_status_idx = sblk->status_idx;
3379                __napi_schedule(&bnapi->napi);
3380        }
3381
3382        return IRQ_HANDLED;
3383}
3384
3385static inline int
3386bnx2_has_fast_work(struct bnx2_napi *bnapi)
3387{
3388        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3389        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3390
3391        if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3392            (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3393                return 1;
3394        return 0;
3395}
3396
3397#define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3398                                 STATUS_ATTN_BITS_TIMER_ABORT)
3399
3400static inline int
3401bnx2_has_work(struct bnx2_napi *bnapi)
3402{
3403        struct status_block *sblk = bnapi->status_blk.msi;
3404
3405        if (bnx2_has_fast_work(bnapi))
3406                return 1;
3407
3408#ifdef BCM_CNIC
3409        if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3410                return 1;
3411#endif
3412
3413        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3414            (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3415                return 1;
3416
3417        return 0;
3418}
3419
3420static void
3421bnx2_chk_missed_msi(struct bnx2 *bp)
3422{
3423        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3424        u32 msi_ctrl;
3425
3426        if (bnx2_has_work(bnapi)) {
3427                msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3428                if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3429                        return;
3430
3431                if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3432                        BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3433                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3434                        BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3435                        bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3436                }
3437        }
3438
3439        bp->idle_chk_status_idx = bnapi->last_status_idx;
3440}
3441
3442#ifdef BCM_CNIC
3443static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444{
3445        struct cnic_ops *c_ops;
3446
3447        if (!bnapi->cnic_present)
3448                return;
3449
3450        rcu_read_lock();
3451        c_ops = rcu_dereference(bp->cnic_ops);
3452        if (c_ops)
3453                bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3454                                                      bnapi->status_blk.msi);
3455        rcu_read_unlock();
3456}
3457#endif
3458
3459static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3460{
3461        struct status_block *sblk = bnapi->status_blk.msi;
3462        u32 status_attn_bits = sblk->status_attn_bits;
3463        u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3464
3465        if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3466            (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3467
3468                bnx2_phy_int(bp, bnapi);
3469
3470                /* This is needed to take care of transient status
3471                 * during link changes.
3472                 */
3473                BNX2_WR(bp, BNX2_HC_COMMAND,
3474                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3475                BNX2_RD(bp, BNX2_HC_COMMAND);
3476        }
3477}
3478
3479static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3480                          int work_done, int budget)
3481{
3482        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3483        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3484
3485        if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3486                bnx2_tx_int(bp, bnapi, 0);
3487
3488        if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3489                work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3490
3491        return work_done;
3492}
3493
3494static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3495{
3496        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3497        struct bnx2 *bp = bnapi->bp;
3498        int work_done = 0;
3499        struct status_block_msix *sblk = bnapi->status_blk.msix;
3500
3501        while (1) {
3502                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3503                if (unlikely(work_done >= budget))
3504                        break;
3505
3506                bnapi->last_status_idx = sblk->status_idx;
3507                /* status idx must be read before checking for more work. */
3508                rmb();
3509                if (likely(!bnx2_has_fast_work(bnapi))) {
3510
3511                        napi_complete_done(napi, work_done);
3512                        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3513                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514                                bnapi->last_status_idx);
3515                        break;
3516                }
3517        }
3518        return work_done;
3519}
3520
3521static int bnx2_poll(struct napi_struct *napi, int budget)
3522{
3523        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3524        struct bnx2 *bp = bnapi->bp;
3525        int work_done = 0;
3526        struct status_block *sblk = bnapi->status_blk.msi;
3527
3528        while (1) {
3529                bnx2_poll_link(bp, bnapi);
3530
3531                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532
3533#ifdef BCM_CNIC
3534                bnx2_poll_cnic(bp, bnapi);
3535#endif
3536
3537                /* bnapi->last_status_idx is used below to tell the hw how
3538                 * much work has been processed, so we must read it before
3539                 * checking for more work.
3540                 */
3541                bnapi->last_status_idx = sblk->status_idx;
3542
3543                if (unlikely(work_done >= budget))
3544                        break;
3545
3546                rmb();
3547                if (likely(!bnx2_has_work(bnapi))) {
3548                        napi_complete_done(napi, work_done);
3549                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3550                                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552                                        bnapi->last_status_idx);
3553                                break;
3554                        }
3555                        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3556                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3557                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3558                                bnapi->last_status_idx);
3559
3560                        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562                                bnapi->last_status_idx);
3563                        break;
3564                }
3565        }
3566
3567        return work_done;
3568}
3569
3570/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3571 * from set_multicast.
3572 */
3573static void
3574bnx2_set_rx_mode(struct net_device *dev)
3575{
3576        struct bnx2 *bp = netdev_priv(dev);
3577        u32 rx_mode, sort_mode;
3578        struct netdev_hw_addr *ha;
3579        int i;
3580
3581        if (!netif_running(dev))
3582                return;
3583
3584        spin_lock_bh(&bp->phy_lock);
3585
3586        rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3587                                  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3588        sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3589        if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590             (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3591                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3592        if (dev->flags & IFF_PROMISC) {
3593                /* Promiscuous mode. */
3594                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3595                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3596                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3597        }
3598        else if (dev->flags & IFF_ALLMULTI) {
3599                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3600                        BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3601                                0xffffffff);
3602                }
3603                sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3604        }
3605        else {
3606                /* Accept one or more multicast(s). */
3607                u32 mc_filter[NUM_MC_HASH_REGISTERS];
3608                u32 regidx;
3609                u32 bit;
3610                u32 crc;
3611
3612                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3613
3614                netdev_for_each_mc_addr(ha, dev) {
3615                        crc = ether_crc_le(ETH_ALEN, ha->addr);
3616                        bit = crc & 0xff;
3617                        regidx = (bit & 0xe0) >> 5;
3618                        bit &= 0x1f;
3619                        mc_filter[regidx] |= (1 << bit);
3620                }
3621
3622                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3623                        BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3624                                mc_filter[i]);
3625                }
3626
3627                sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3628        }
3629
3630        if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3631                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3632                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3633                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3634        } else if (!(dev->flags & IFF_PROMISC)) {
3635                /* Add all entries into to the match filter list */
3636                i = 0;
3637                netdev_for_each_uc_addr(ha, dev) {
3638                        bnx2_set_mac_addr(bp, ha->addr,
3639                                          i + BNX2_START_UNICAST_ADDRESS_INDEX);
3640                        sort_mode |= (1 <<
3641                                      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3642                        i++;
3643                }
3644
3645        }
3646
3647        if (rx_mode != bp->rx_mode) {
3648                bp->rx_mode = rx_mode;
3649                BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650        }
3651
3652        BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653        BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3654        BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3655
3656        spin_unlock_bh(&bp->phy_lock);
3657}
3658
3659static int
3660check_fw_section(const struct firmware *fw,
3661                 const struct bnx2_fw_file_section *section,
3662                 u32 alignment, bool non_empty)
3663{
3664        u32 offset = be32_to_cpu(section->offset);
3665        u32 len = be32_to_cpu(section->len);
3666
3667        if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3668                return -EINVAL;
3669        if ((non_empty && len == 0) || len > fw->size - offset ||
3670            len & (alignment - 1))
3671                return -EINVAL;
3672        return 0;
3673}
3674
3675static int
3676check_mips_fw_entry(const struct firmware *fw,
3677                    const struct bnx2_mips_fw_file_entry *entry)
3678{
3679        if (check_fw_section(fw, &entry->text, 4, true) ||
3680            check_fw_section(fw, &entry->data, 4, false) ||
3681            check_fw_section(fw, &entry->rodata, 4, false))
3682                return -EINVAL;
3683        return 0;
3684}
3685
3686static void bnx2_release_firmware(struct bnx2 *bp)
3687{
3688        if (bp->rv2p_firmware) {
3689                release_firmware(bp->mips_firmware);
3690                release_firmware(bp->rv2p_firmware);
3691                bp->rv2p_firmware = NULL;
3692        }
3693}
3694
3695static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3696{
3697        const char *mips_fw_file, *rv2p_fw_file;
3698        const struct bnx2_mips_fw_file *mips_fw;
3699        const struct bnx2_rv2p_fw_file *rv2p_fw;
3700        int rc;
3701
3702        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3703                mips_fw_file = FW_MIPS_FILE_09;
3704                if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3705                    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3706                        rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3707                else
3708                        rv2p_fw_file = FW_RV2P_FILE_09;
3709        } else {
3710                mips_fw_file = FW_MIPS_FILE_06;
3711                rv2p_fw_file = FW_RV2P_FILE_06;
3712        }
3713
3714        rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3715        if (rc) {
3716                pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3717                goto out;
3718        }
3719
3720        rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3721        if (rc) {
3722                pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3723                goto err_release_mips_firmware;
3724        }
3725        mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3726        rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3727        if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3728            check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3729            check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3730            check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3731            check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3732            check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3733                pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3734                rc = -EINVAL;
3735                goto err_release_firmware;
3736        }
3737        if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3738            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3739            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3740                pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3741                rc = -EINVAL;
3742                goto err_release_firmware;
3743        }
3744out:
3745        return rc;
3746
3747err_release_firmware:
3748        release_firmware(bp->rv2p_firmware);
3749        bp->rv2p_firmware = NULL;
3750err_release_mips_firmware:
3751        release_firmware(bp->mips_firmware);
3752        goto out;
3753}
3754
3755static int bnx2_request_firmware(struct bnx2 *bp)
3756{
3757        return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3758}
3759
3760static u32
3761rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3762{
3763        switch (idx) {
3764        case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3765                rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3766                rv2p_code |= RV2P_BD_PAGE_SIZE;
3767                break;
3768        }
3769        return rv2p_code;
3770}
3771
3772static int
3773load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3774             const struct bnx2_rv2p_fw_file_entry *fw_entry)
3775{
3776        u32 rv2p_code_len, file_offset;
3777        __be32 *rv2p_code;
3778        int i;
3779        u32 val, cmd, addr;
3780
3781        rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3782        file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3783
3784        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3785
3786        if (rv2p_proc == RV2P_PROC1) {
3787                cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3788                addr = BNX2_RV2P_PROC1_ADDR_CMD;
3789        } else {
3790                cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3791                addr = BNX2_RV2P_PROC2_ADDR_CMD;
3792        }
3793
3794        for (i = 0; i < rv2p_code_len; i += 8) {
3795                BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3796                rv2p_code++;
3797                BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3798                rv2p_code++;
3799
3800                val = (i / 8) | cmd;
3801                BNX2_WR(bp, addr, val);
3802        }
3803
3804        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3805        for (i = 0; i < 8; i++) {
3806                u32 loc, code;
3807
3808                loc = be32_to_cpu(fw_entry->fixup[i]);
3809                if (loc && ((loc * 4) < rv2p_code_len)) {
3810                        code = be32_to_cpu(*(rv2p_code + loc - 1));
3811                        BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812                        code = be32_to_cpu(*(rv2p_code + loc));
3813                        code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3814                        BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815
3816                        val = (loc / 2) | cmd;
3817                        BNX2_WR(bp, addr, val);
3818                }
3819        }
3820
3821        /* Reset the processor, un-stall is done later. */
3822        if (rv2p_proc == RV2P_PROC1) {
3823                BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824        }
3825        else {
3826                BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3827        }
3828
3829        return 0;
3830}
3831
3832static int
3833load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3834            const struct bnx2_mips_fw_file_entry *fw_entry)
3835{
3836        u32 addr, len, file_offset;
3837        __be32 *data;
3838        u32 offset;
3839        u32 val;
3840
3841        /* Halt the CPU. */
3842        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3843        val |= cpu_reg->mode_value_halt;
3844        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3845        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3846
3847        /* Load the Text area. */
3848        addr = be32_to_cpu(fw_entry->text.addr);
3849        len = be32_to_cpu(fw_entry->text.len);
3850        file_offset = be32_to_cpu(fw_entry->text.offset);
3851        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3852
3853        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3854        if (len) {
3855                int j;
3856
3857                for (j = 0; j < (len / 4); j++, offset += 4)
3858                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3859        }
3860
3861        /* Load the Data area. */
3862        addr = be32_to_cpu(fw_entry->data.addr);
3863        len = be32_to_cpu(fw_entry->data.len);
3864        file_offset = be32_to_cpu(fw_entry->data.offset);
3865        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866
3867        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868        if (len) {
3869                int j;
3870
3871                for (j = 0; j < (len / 4); j++, offset += 4)
3872                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873        }
3874
3875        /* Load the Read-Only area. */
3876        addr = be32_to_cpu(fw_entry->rodata.addr);
3877        len = be32_to_cpu(fw_entry->rodata.len);
3878        file_offset = be32_to_cpu(fw_entry->rodata.offset);
3879        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880
3881        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882        if (len) {
3883                int j;
3884
3885                for (j = 0; j < (len / 4); j++, offset += 4)
3886                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887        }
3888
3889        /* Clear the pre-fetch instruction. */
3890        bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891
3892        val = be32_to_cpu(fw_entry->start_addr);
3893        bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894
3895        /* Start the CPU. */
3896        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3897        val &= ~cpu_reg->mode_value_halt;
3898        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3899        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3900
3901        return 0;
3902}
3903
3904static int
3905bnx2_init_cpus(struct bnx2 *bp)
3906{
3907        const struct bnx2_mips_fw_file *mips_fw =
3908                (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3909        const struct bnx2_rv2p_fw_file *rv2p_fw =
3910                (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3911        int rc;
3912
3913        /* Initialize the RV2P processor. */
3914        load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3915        load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3916
3917        /* Initialize the RX Processor. */
3918        rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3919        if (rc)
3920                goto init_cpu_err;
3921
3922        /* Initialize the TX Processor. */
3923        rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3924        if (rc)
3925                goto init_cpu_err;
3926
3927        /* Initialize the TX Patch-up Processor. */
3928        rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3929        if (rc)
3930                goto init_cpu_err;
3931
3932        /* Initialize the Completion Processor. */
3933        rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3934        if (rc)
3935                goto init_cpu_err;
3936
3937        /* Initialize the Command Processor. */
3938        rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3939
3940init_cpu_err:
3941        return rc;
3942}
3943
3944static void
3945bnx2_setup_wol(struct bnx2 *bp)
3946{
3947        int i;
3948        u32 val, wol_msg;
3949
3950        if (bp->wol) {
3951                u32 advertising;
3952                u8 autoneg;
3953
3954                autoneg = bp->autoneg;
3955                advertising = bp->advertising;
3956
3957                if (bp->phy_port == PORT_TP) {
3958                        bp->autoneg = AUTONEG_SPEED;
3959                        bp->advertising = ADVERTISED_10baseT_Half |
3960                                ADVERTISED_10baseT_Full |
3961                                ADVERTISED_100baseT_Half |
3962                                ADVERTISED_100baseT_Full |
3963                                ADVERTISED_Autoneg;
3964                }
3965
3966                spin_lock_bh(&bp->phy_lock);
3967                bnx2_setup_phy(bp, bp->phy_port);
3968                spin_unlock_bh(&bp->phy_lock);
3969
3970                bp->autoneg = autoneg;
3971                bp->advertising = advertising;
3972
3973                bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3974
3975                val = BNX2_RD(bp, BNX2_EMAC_MODE);
3976
3977                /* Enable port mode. */
3978                val &= ~BNX2_EMAC_MODE_PORT;
3979                val |= BNX2_EMAC_MODE_MPKT_RCVD |
3980                       BNX2_EMAC_MODE_ACPI_RCVD |
3981                       BNX2_EMAC_MODE_MPKT;
3982                if (bp->phy_port == PORT_TP) {
3983                        val |= BNX2_EMAC_MODE_PORT_MII;
3984                } else {
3985                        val |= BNX2_EMAC_MODE_PORT_GMII;
3986                        if (bp->line_speed == SPEED_2500)
3987                                val |= BNX2_EMAC_MODE_25G_MODE;
3988                }
3989
3990                BNX2_WR(bp, BNX2_EMAC_MODE, val);
3991
3992                /* receive all multicast */
3993                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3994                        BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3995                                0xffffffff);
3996                }
3997                BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3998
3999                val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4000                BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4001                BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4002                BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4003
4004                /* Need to enable EMAC and RPM for WOL. */
4005                BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4006                        BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4007                        BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4008                        BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4009
4010                val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4011                val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4012                BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4013
4014                wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4015        } else {
4016                        wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017        }
4018
4019        if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4020                u32 val;
4021
4022                wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4023                if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4024                        bnx2_fw_sync(bp, wol_msg, 1, 0);
4025                        return;
4026                }
4027                /* Tell firmware not to power down the PHY yet, otherwise
4028                 * the chip will take a long time to respond to MMIO reads.
4029                 */
4030                val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4031                bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4032                              val | BNX2_PORT_FEATURE_ASF_ENABLED);
4033                bnx2_fw_sync(bp, wol_msg, 1, 0);
4034                bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4035        }
4036
4037}
4038
4039static int
4040bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4041{
4042        switch (state) {
4043        case PCI_D0: {
4044                u32 val;
4045
4046                pci_enable_wake(bp->pdev, PCI_D0, false);
4047                pci_set_power_state(bp->pdev, PCI_D0);
4048
4049                val = BNX2_RD(bp, BNX2_EMAC_MODE);
4050                val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4051                val &= ~BNX2_EMAC_MODE_MPKT;
4052                BNX2_WR(bp, BNX2_EMAC_MODE, val);
4053
4054                val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4055                val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4056                BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4057                break;
4058        }
4059        case PCI_D3hot: {
4060                bnx2_setup_wol(bp);
4061                pci_wake_from_d3(bp->pdev, bp->wol);
4062                if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4063                    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4064
4065                        if (bp->wol)
4066                                pci_set_power_state(bp->pdev, PCI_D3hot);
4067                        break;
4068
4069                }
4070                if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4071                        u32 val;
4072
4073                        /* Tell firmware not to power down the PHY yet,
4074                         * otherwise the other port may not respond to
4075                         * MMIO reads.
4076                         */
4077                        val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4078                        val &= ~BNX2_CONDITION_PM_STATE_MASK;
4079                        val |= BNX2_CONDITION_PM_STATE_UNPREP;
4080                        bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4081                }
4082                pci_set_power_state(bp->pdev, PCI_D3hot);
4083
4084                /* No more memory access after this point until
4085                 * device is brought back to D0.
4086                 */
4087                break;
4088        }
4089        default:
4090                return -EINVAL;
4091        }
4092        return 0;
4093}
4094
4095static int
4096bnx2_acquire_nvram_lock(struct bnx2 *bp)
4097{
4098        u32 val;
4099        int j;
4100
4101        /* Request access to the flash interface. */
4102        BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4103        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4104                val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4105                if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4106                        break;
4107
4108                udelay(5);
4109        }
4110
4111        if (j >= NVRAM_TIMEOUT_COUNT)
4112                return -EBUSY;
4113
4114        return 0;
4115}
4116
4117static int
4118bnx2_release_nvram_lock(struct bnx2 *bp)
4119{
4120        int j;
4121        u32 val;
4122
4123        /* Relinquish nvram interface. */
4124        BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4125
4126        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127                val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4128                if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4129                        break;
4130
4131                udelay(5);
4132        }
4133
4134        if (j >= NVRAM_TIMEOUT_COUNT)
4135                return -EBUSY;
4136
4137        return 0;
4138}
4139
4140
4141static int
4142bnx2_enable_nvram_write(struct bnx2 *bp)
4143{
4144        u32 val;
4145
4146        val = BNX2_RD(bp, BNX2_MISC_CFG);
4147        BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4148
4149        if (bp->flash_info->flags & BNX2_NV_WREN) {
4150                int j;
4151
4152                BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153                BNX2_WR(bp, BNX2_NVM_COMMAND,
4154                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4155
4156                for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4157                        udelay(5);
4158
4159                        val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4160                        if (val & BNX2_NVM_COMMAND_DONE)
4161                                break;
4162                }
4163
4164                if (j >= NVRAM_TIMEOUT_COUNT)
4165                        return -EBUSY;
4166        }
4167        return 0;
4168}
4169
4170static void
4171bnx2_disable_nvram_write(struct bnx2 *bp)
4172{
4173        u32 val;
4174
4175        val = BNX2_RD(bp, BNX2_MISC_CFG);
4176        BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4177}
4178
4179
4180static void
4181bnx2_enable_nvram_access(struct bnx2 *bp)
4182{
4183        u32 val;
4184
4185        val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4186        /* Enable both bits, even on read. */
4187        BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4189}
4190
4191static void
4192bnx2_disable_nvram_access(struct bnx2 *bp)
4193{
4194        u32 val;
4195
4196        val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4197        /* Disable both bits, even after read. */
4198        BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4199                val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4200                        BNX2_NVM_ACCESS_ENABLE_WR_EN));
4201}
4202
4203static int
4204bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4205{
4206        u32 cmd;
4207        int j;
4208
4209        if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4210                /* Buffered flash, no erase needed */
4211                return 0;
4212
4213        /* Build an erase command */
4214        cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4215              BNX2_NVM_COMMAND_DOIT;
4216
4217        /* Need to clear DONE bit separately. */
4218        BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4219
4220        /* Address of the NVRAM to read from. */
4221        BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4222
4223        /* Issue an erase command. */
4224        BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4225
4226        /* Wait for completion. */
4227        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228                u32 val;
4229
4230                udelay(5);
4231
4232                val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4233                if (val & BNX2_NVM_COMMAND_DONE)
4234                        break;
4235        }
4236
4237        if (j >= NVRAM_TIMEOUT_COUNT)
4238                return -EBUSY;
4239
4240        return 0;
4241}
4242
4243static int
4244bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4245{
4246        u32 cmd;
4247        int j;
4248
4249        /* Build the command word. */
4250        cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4251
4252        /* Calculate an offset of a buffered flash, not needed for 5709. */
4253        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254                offset = ((offset / bp->flash_info->page_size) <<
4255                           bp->flash_info->page_bits) +
4256                          (offset % bp->flash_info->page_size);
4257        }
4258
4259        /* Need to clear DONE bit separately. */
4260        BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261
4262        /* Address of the NVRAM to read from. */
4263        BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4264
4265        /* Issue a read command. */
4266        BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4267
4268        /* Wait for completion. */
4269        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4270                u32 val;
4271
4272                udelay(5);
4273
4274                val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4275                if (val & BNX2_NVM_COMMAND_DONE) {
4276                        __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4277                        memcpy(ret_val, &v, 4);
4278                        break;
4279                }
4280        }
4281        if (j >= NVRAM_TIMEOUT_COUNT)
4282                return -EBUSY;
4283
4284        return 0;
4285}
4286
4287
4288static int
4289bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4290{
4291        u32 cmd;
4292        __be32 val32;
4293        int j;
4294
4295        /* Build the command word. */
4296        cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4297
4298        /* Calculate an offset of a buffered flash, not needed for 5709. */
4299        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4300                offset = ((offset / bp->flash_info->page_size) <<
4301                          bp->flash_info->page_bits) +
4302                         (offset % bp->flash_info->page_size);
4303        }
4304
4305        /* Need to clear DONE bit separately. */
4306        BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4307
4308        memcpy(&val32, val, 4);
4309
4310        /* Write the data. */
4311        BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4312
4313        /* Address of the NVRAM to write to. */
4314        BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4315
4316        /* Issue the write command. */
4317        BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4318
4319        /* Wait for completion. */
4320        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4321                udelay(5);
4322
4323                if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4324                        break;
4325        }
4326        if (j >= NVRAM_TIMEOUT_COUNT)
4327                return -EBUSY;
4328
4329        return 0;
4330}
4331
4332static int
4333bnx2_init_nvram(struct bnx2 *bp)
4334{
4335        u32 val;
4336        int j, entry_count, rc = 0;
4337        const struct flash_spec *flash;
4338
4339        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4340                bp->flash_info = &flash_5709;
4341                goto get_flash_size;
4342        }
4343
4344        /* Determine the selected interface. */
4345        val = BNX2_RD(bp, BNX2_NVM_CFG1);
4346
4347        entry_count = ARRAY_SIZE(flash_table);
4348
4349        if (val & 0x40000000) {
4350
4351                /* Flash interface has been reconfigured */
4352                for (j = 0, flash = &flash_table[0]; j < entry_count;
4353                     j++, flash++) {
4354                        if ((val & FLASH_BACKUP_STRAP_MASK) ==
4355                            (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4356                                bp->flash_info = flash;
4357                                break;
4358                        }
4359                }
4360        }
4361        else {
4362                u32 mask;
4363                /* Not yet been reconfigured */
4364
4365                if (val & (1 << 23))
4366                        mask = FLASH_BACKUP_STRAP_MASK;
4367                else
4368                        mask = FLASH_STRAP_MASK;
4369
4370                for (j = 0, flash = &flash_table[0]; j < entry_count;
4371                        j++, flash++) {
4372
4373                        if ((val & mask) == (flash->strapping & mask)) {
4374                                bp->flash_info = flash;
4375
4376                                /* Request access to the flash interface. */
4377                                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378                                        return rc;
4379
4380                                /* Enable access to flash interface */
4381                                bnx2_enable_nvram_access(bp);
4382
4383                                /* Reconfigure the flash interface */
4384                                BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4385                                BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4386                                BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4387                                BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4388
4389                                /* Disable access to flash interface */
4390                                bnx2_disable_nvram_access(bp);
4391                                bnx2_release_nvram_lock(bp);
4392
4393                                break;
4394                        }
4395                }
4396        } /* if (val & 0x40000000) */
4397
4398        if (j == entry_count) {
4399                bp->flash_info = NULL;
4400                pr_alert("Unknown flash/EEPROM type\n");
4401                return -ENODEV;
4402        }
4403
4404get_flash_size:
4405        val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4406        val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4407        if (val)
4408                bp->flash_size = val;
4409        else
4410                bp->flash_size = bp->flash_info->total_size;
4411
4412        return rc;
4413}
4414
4415static int
4416bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4417                int buf_size)
4418{
4419        int rc = 0;
4420        u32 cmd_flags, offset32, len32, extra;
4421
4422        if (buf_size == 0)
4423                return 0;
4424
4425        /* Request access to the flash interface. */
4426        if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4427                return rc;
4428
4429        /* Enable access to flash interface */
4430        bnx2_enable_nvram_access(bp);
4431
4432        len32 = buf_size;
4433        offset32 = offset;
4434        extra = 0;
4435
4436        cmd_flags = 0;
4437
4438        if (offset32 & 3) {
4439                u8 buf[4];
4440                u32 pre_len;
4441
4442                offset32 &= ~3;
4443                pre_len = 4 - (offset & 3);
4444
4445                if (pre_len >= len32) {
4446                        pre_len = len32;
4447                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4448                                    BNX2_NVM_COMMAND_LAST;
4449                }
4450                else {
4451                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4452                }
4453
4454                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4455
4456                if (rc)
4457                        return rc;
4458
4459                memcpy(ret_buf, buf + (offset & 3), pre_len);
4460
4461                offset32 += 4;
4462                ret_buf += pre_len;
4463                len32 -= pre_len;
4464        }
4465        if (len32 & 3) {
4466                extra = 4 - (len32 & 3);
4467                len32 = (len32 + 4) & ~3;
4468        }
4469
4470        if (len32 == 4) {
4471                u8 buf[4];
4472
4473                if (cmd_flags)
4474                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4475                else
4476                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4477                                    BNX2_NVM_COMMAND_LAST;
4478
4479                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4480
4481                memcpy(ret_buf, buf, 4 - extra);
4482        }
4483        else if (len32 > 0) {
4484                u8 buf[4];
4485
4486                /* Read the first word. */
4487                if (cmd_flags)
4488                        cmd_flags = 0;
4489                else
4490                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4491
4492                rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4493
4494                /* Advance to the next dword. */
4495                offset32 += 4;
4496                ret_buf += 4;
4497                len32 -= 4;
4498
4499                while (len32 > 4 && rc == 0) {
4500                        rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4501
4502                        /* Advance to the next dword. */
4503                        offset32 += 4;
4504                        ret_buf += 4;
4505                        len32 -= 4;
4506                }
4507
4508                if (rc)
4509                        return rc;
4510
4511                cmd_flags = BNX2_NVM_COMMAND_LAST;
4512                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4513
4514                memcpy(ret_buf, buf, 4 - extra);
4515        }
4516
4517        /* Disable access to flash interface */
4518        bnx2_disable_nvram_access(bp);
4519
4520        bnx2_release_nvram_lock(bp);
4521
4522        return rc;
4523}
4524
4525static int
4526bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4527                int buf_size)
4528{
4529        u32 written, offset32, len32;
4530        u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4531        int rc = 0;
4532        int align_start, align_end;
4533
4534        buf = data_buf;
4535        offset32 = offset;
4536        len32 = buf_size;
4537        align_start = align_end = 0;
4538
4539        if ((align_start = (offset32 & 3))) {
4540                offset32 &= ~3;
4541                len32 += align_start;
4542                if (len32 < 4)
4543                        len32 = 4;
4544                if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4545                        return rc;
4546        }
4547
4548        if (len32 & 3) {
4549                align_end = 4 - (len32 & 3);
4550                len32 += align_end;
4551                if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4552                        return rc;
4553        }
4554
4555        if (align_start || align_end) {
4556                align_buf = kmalloc(len32, GFP_KERNEL);
4557                if (!align_buf)
4558                        return -ENOMEM;
4559                if (align_start) {
4560                        memcpy(align_buf, start, 4);
4561                }
4562                if (align_end) {
4563                        memcpy(align_buf + len32 - 4, end, 4);
4564                }
4565                memcpy(align_buf + align_start, data_buf, buf_size);
4566                buf = align_buf;
4567        }
4568
4569        if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570                flash_buffer = kmalloc(264, GFP_KERNEL);
4571                if (!flash_buffer) {
4572                        rc = -ENOMEM;
4573                        goto nvram_write_end;
4574                }
4575        }
4576
4577        written = 0;
4578        while ((written < len32) && (rc == 0)) {
4579                u32 page_start, page_end, data_start, data_end;
4580                u32 addr, cmd_flags;
4581                int i;
4582
4583                /* Find the page_start addr */
4584                page_start = offset32 + written;
4585                page_start -= (page_start % bp->flash_info->page_size);
4586                /* Find the page_end addr */
4587                page_end = page_start + bp->flash_info->page_size;
4588                /* Find the data_start addr */
4589                data_start = (written == 0) ? offset32 : page_start;
4590                /* Find the data_end addr */
4591                data_end = (page_end > offset32 + len32) ?
4592                        (offset32 + len32) : page_end;
4593
4594                /* Request access to the flash interface. */
4595                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4596                        goto nvram_write_end;
4597
4598                /* Enable access to flash interface */
4599                bnx2_enable_nvram_access(bp);
4600
4601                cmd_flags = BNX2_NVM_COMMAND_FIRST;
4602                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603                        int j;
4604
4605                        /* Read the whole page into the buffer
4606                         * (non-buffer flash only) */
4607                        for (j = 0; j < bp->flash_info->page_size; j += 4) {
4608                                if (j == (bp->flash_info->page_size - 4)) {
4609                                        cmd_flags |= BNX2_NVM_COMMAND_LAST;
4610                                }
4611                                rc = bnx2_nvram_read_dword(bp,
4612                                        page_start + j,
4613                                        &flash_buffer[j],
4614                                        cmd_flags);
4615
4616                                if (rc)
4617                                        goto nvram_write_end;
4618
4619                                cmd_flags = 0;
4620                        }
4621                }
4622
4623                /* Enable writes to flash interface (unlock write-protect) */
4624                if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4625                        goto nvram_write_end;
4626
4627                /* Loop to write back the buffer data from page_start to
4628                 * data_start */
4629                i = 0;
4630                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4631                        /* Erase the page */
4632                        if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4633                                goto nvram_write_end;
4634
4635                        /* Re-enable the write again for the actual write */
4636                        bnx2_enable_nvram_write(bp);
4637
4638                        for (addr = page_start; addr < data_start;
4639                                addr += 4, i += 4) {
4640
4641                                rc = bnx2_nvram_write_dword(bp, addr,
4642                                        &flash_buffer[i], cmd_flags);
4643
4644                                if (rc != 0)
4645                                        goto nvram_write_end;
4646
4647                                cmd_flags = 0;
4648                        }
4649                }
4650
4651                /* Loop to write the new data from data_start to data_end */
4652                for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4653                        if ((addr == page_end - 4) ||
4654                                ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4655                                 (addr == data_end - 4))) {
4656
4657                                cmd_flags |= BNX2_NVM_COMMAND_LAST;
4658                        }
4659                        rc = bnx2_nvram_write_dword(bp, addr, buf,
4660                                cmd_flags);
4661
4662                        if (rc != 0)
4663                                goto nvram_write_end;
4664
4665                        cmd_flags = 0;
4666                        buf += 4;
4667                }
4668
4669                /* Loop to write back the buffer data from data_end
4670                 * to page_end */
4671                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4672                        for (addr = data_end; addr < page_end;
4673                                addr += 4, i += 4) {
4674
4675                                if (addr == page_end-4) {
4676                                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4677                                }
4678                                rc = bnx2_nvram_write_dword(bp, addr,
4679                                        &flash_buffer[i], cmd_flags);
4680
4681                                if (rc != 0)
4682                                        goto nvram_write_end;
4683
4684                                cmd_flags = 0;
4685                        }
4686                }
4687
4688                /* Disable writes to flash interface (lock write-protect) */
4689                bnx2_disable_nvram_write(bp);
4690
4691                /* Disable access to flash interface */
4692                bnx2_disable_nvram_access(bp);
4693                bnx2_release_nvram_lock(bp);
4694
4695                /* Increment written */
4696                written += data_end - data_start;
4697        }
4698
4699nvram_write_end:
4700        kfree(flash_buffer);
4701        kfree(align_buf);
4702        return rc;
4703}
4704
4705static void
4706bnx2_init_fw_cap(struct bnx2 *bp)
4707{
4708        u32 val, sig = 0;
4709
4710        bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4711        bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4712
4713        if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4714                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4715
4716        val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4717        if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4718                return;
4719
4720        if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4721                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722                sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4723        }
4724
4725        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4726            (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4727                u32 link;
4728
4729                bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4730
4731                link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4732                if (link & BNX2_LINK_STATUS_SERDES_LINK)
4733                        bp->phy_port = PORT_FIBRE;
4734                else
4735                        bp->phy_port = PORT_TP;
4736
4737                sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4738                       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4739        }
4740
4741        if (netif_running(bp->dev) && sig)
4742                bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4743}
4744
4745static void
4746bnx2_setup_msix_tbl(struct bnx2 *bp)
4747{
4748        BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4749
4750        BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4751        BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4752}
4753
4754static void
4755bnx2_wait_dma_complete(struct bnx2 *bp)
4756{
4757        u32 val;
4758        int i;
4759
4760        /*
4761         * Wait for the current PCI transaction to complete before
4762         * issuing a reset.
4763         */
4764        if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4765            (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4766                BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4767                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4768                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4769                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4770                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4771                val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4772                udelay(5);
4773        } else {  /* 5709 */
4774                val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4775                val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4776                BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4777                val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778
4779                for (i = 0; i < 100; i++) {
4780                        msleep(1);
4781                        val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4782                        if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4783                                break;
4784                }
4785        }
4786
4787        return;
4788}
4789
4790
4791static int
4792bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4793{
4794        u32 val;
4795        int i, rc = 0;
4796        u8 old_port;
4797
4798        /* Wait for the current PCI transaction to complete before
4799         * issuing a reset. */
4800        bnx2_wait_dma_complete(bp);
4801
4802        /* Wait for the firmware to tell us it is ok to issue a reset. */
4803        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4804
4805        /* Deposit a driver reset signature so the firmware knows that
4806         * this is a soft reset. */
4807        bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4808                      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4809
4810        /* Do a dummy read to force the chip to complete all current transaction
4811         * before we issue a reset. */
4812        val = BNX2_RD(bp, BNX2_MISC_ID);
4813
4814        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4815                BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4816                BNX2_RD(bp, BNX2_MISC_COMMAND);
4817                udelay(5);
4818
4819                val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4820                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4821
4822                BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4823
4824        } else {
4825                val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4826                      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828
4829                /* Chip reset. */
4830                BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4831
4832                /* Reading back any register after chip reset will hang the
4833                 * bus on 5706 A0 and A1.  The msleep below provides plenty
4834                 * of margin for write posting.
4835                 */
4836                if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4837                    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4838                        msleep(20);
4839
4840                /* Reset takes approximate 30 usec */
4841                for (i = 0; i < 10; i++) {
4842                        val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4843                        if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4844                                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4845                                break;
4846                        udelay(10);
4847                }
4848
4849                if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4850                           BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4851                        pr_err("Chip reset did not complete\n");
4852                        return -EBUSY;
4853                }
4854        }
4855
4856        /* Make sure byte swapping is properly configured. */
4857        val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4858        if (val != 0x01020304) {
4859                pr_err("Chip not in correct endian mode\n");
4860                return -ENODEV;
4861        }
4862
4863        /* Wait for the firmware to finish its initialization. */
4864        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4865        if (rc)
4866                return rc;
4867
4868        spin_lock_bh(&bp->phy_lock);
4869        old_port = bp->phy_port;
4870        bnx2_init_fw_cap(bp);
4871        if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4872            old_port != bp->phy_port)
4873                bnx2_set_default_remote_link(bp);
4874        spin_unlock_bh(&bp->phy_lock);
4875
4876        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4877                /* Adjust the voltage regular to two steps lower.  The default
4878                 * of this register is 0x0000000e. */
4879                BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4880
4881                /* Remove bad rbuf memory from the free pool. */
4882                rc = bnx2_alloc_bad_rbuf(bp);
4883        }
4884
4885        if (bp->flags & BNX2_FLAG_USING_MSIX) {
4886                bnx2_setup_msix_tbl(bp);
4887                /* Prevent MSIX table reads and write from timing out */
4888                BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4889                        BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4890        }
4891
4892        return rc;
4893}
4894
4895static int
4896bnx2_init_chip(struct bnx2 *bp)
4897{
4898        u32 val, mtu;
4899        int rc, i;
4900
4901        /* Make sure the interrupt is not active. */
4902        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4903
4904        val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4905              BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4906#ifdef __BIG_ENDIAN
4907              BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4908#endif
4909              BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4910              DMA_READ_CHANS << 12 |
4911              DMA_WRITE_CHANS << 16;
4912
4913        val |= (0x2 << 20) | (1 << 11);
4914
4915        if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4916                val |= (1 << 23);
4917
4918        if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4919            (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4920            !(bp->flags & BNX2_FLAG_PCIX))
4921                val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4922
4923        BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4924
4925        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4926                val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4927                val |= BNX2_TDMA_CONFIG_ONE_DMA;
4928                BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4929        }
4930
4931        if (bp->flags & BNX2_FLAG_PCIX) {
4932                u16 val16;
4933
4934                pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4935                                     &val16);
4936                pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4937                                      val16 & ~PCI_X_CMD_ERO);
4938        }
4939
4940        BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4941                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4942                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4943                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4944
4945        /* Initialize context mapping and zero out the quick contexts.  The
4946         * context block must have already been enabled. */
4947        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948                rc = bnx2_init_5709_context(bp);
4949                if (rc)
4950                        return rc;
4951        } else
4952                bnx2_init_context(bp);
4953
4954        if ((rc = bnx2_init_cpus(bp)) != 0)
4955                return rc;
4956
4957        bnx2_init_nvram(bp);
4958
4959        bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4960
4961        val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4962        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4963        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4964        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4965                val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4966                if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4967                        val |= BNX2_MQ_CONFIG_HALT_DIS;
4968        }
4969
4970        BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4971
4972        val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4973        BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4974        BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4975
4976        val = (BNX2_PAGE_BITS - 8) << 24;
4977        BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4978
4979        /* Configure page size. */
4980        val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4981        val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4982        val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4983        BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4984
4985        val = bp->mac_addr[0] +
4986              (bp->mac_addr[1] << 8) +
4987              (bp->mac_addr[2] << 16) +
4988              bp->mac_addr[3] +
4989              (bp->mac_addr[4] << 8) +
4990              (bp->mac_addr[5] << 16);
4991        BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4992
4993        /* Program the MTU.  Also include 4 bytes for CRC32. */
4994        mtu = bp->dev->mtu;
4995        val = mtu + ETH_HLEN + ETH_FCS_LEN;
4996        if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4997                val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4998        BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4999
5000        if (mtu < ETH_DATA_LEN)
5001                mtu = ETH_DATA_LEN;
5002
5003        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5004        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5005        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5006
5007        memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5008        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5009                bp->bnx2_napi[i].last_status_idx = 0;
5010
5011        bp->idle_chk_status_idx = 0xffff;
5012
5013        /* Set up how to generate a link change interrupt. */
5014        BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5015
5016        BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5017                (u64) bp->status_blk_mapping & 0xffffffff);
5018        BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5019
5020        BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5021                (u64) bp->stats_blk_mapping & 0xffffffff);
5022        BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5023                (u64) bp->stats_blk_mapping >> 32);
5024
5025        BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5026                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5027
5028        BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5029                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5030
5031        BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5032                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5033
5034        BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5035
5036        BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037
5038        BNX2_WR(bp, BNX2_HC_COM_TICKS,
5039                (bp->com_ticks_int << 16) | bp->com_ticks);
5040
5041        BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5042                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5043
5044        if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5045                BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5046        else
5047                BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5048        BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5049
5050        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5051                val = BNX2_HC_CONFIG_COLLECT_STATS;
5052        else {
5053                val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5054                      BNX2_HC_CONFIG_COLLECT_STATS;
5055        }
5056
5057        if (bp->flags & BNX2_FLAG_USING_MSIX) {
5058                BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5059                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
5060
5061                val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5062        }
5063
5064        if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5065                val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5066
5067        BNX2_WR(bp, BNX2_HC_CONFIG, val);
5068
5069        if (bp->rx_ticks < 25)
5070                bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5071        else
5072                bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5073
5074        for (i = 1; i < bp->irq_nvecs; i++) {
5075                u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5076                           BNX2_HC_SB_CONFIG_1;
5077
5078                BNX2_WR(bp, base,
5079                        BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5080                        BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5081                        BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5082
5083                BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5084                        (bp->tx_quick_cons_trip_int << 16) |
5085                         bp->tx_quick_cons_trip);
5086
5087                BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5088                        (bp->tx_ticks_int << 16) | bp->tx_ticks);
5089
5090                BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5091                        (bp->rx_quick_cons_trip_int << 16) |
5092                        bp->rx_quick_cons_trip);
5093
5094                BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5095                        (bp->rx_ticks_int << 16) | bp->rx_ticks);
5096        }
5097
5098        /* Clear internal stats counters. */
5099        BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5100
5101        BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5102
5103        /* Initialize the receive filter. */
5104        bnx2_set_rx_mode(bp->dev);
5105
5106        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5107                val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5108                val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5109                BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5110        }
5111        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5112                          1, 0);
5113
5114        BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5115        BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5116
5117        udelay(20);
5118
5119        bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5120
5121        return rc;
5122}
5123
5124static void
5125bnx2_clear_ring_states(struct bnx2 *bp)
5126{
5127        struct bnx2_napi *bnapi;
5128        struct bnx2_tx_ring_info *txr;
5129        struct bnx2_rx_ring_info *rxr;
5130        int i;
5131
5132        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5133                bnapi = &bp->bnx2_napi[i];
5134                txr = &bnapi->tx_ring;
5135                rxr = &bnapi->rx_ring;
5136
5137                txr->tx_cons = 0;
5138                txr->hw_tx_cons = 0;
5139                rxr->rx_prod_bseq = 0;
5140                rxr->rx_prod = 0;
5141                rxr->rx_cons = 0;
5142                rxr->rx_pg_prod = 0;
5143                rxr->rx_pg_cons = 0;
5144        }
5145}
5146
5147static void
5148bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5149{
5150        u32 val, offset0, offset1, offset2, offset3;
5151        u32 cid_addr = GET_CID_ADDR(cid);
5152
5153        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5154                offset0 = BNX2_L2CTX_TYPE_XI;
5155                offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5156                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5157                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5158        } else {
5159                offset0 = BNX2_L2CTX_TYPE;
5160                offset1 = BNX2_L2CTX_CMD_TYPE;
5161                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5162                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5163        }
5164        val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5165        bnx2_ctx_wr(bp, cid_addr, offset0, val);
5166
5167        val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5168        bnx2_ctx_wr(bp, cid_addr, offset1, val);
5169
5170        val = (u64) txr->tx_desc_mapping >> 32;
5171        bnx2_ctx_wr(bp, cid_addr, offset2, val);
5172
5173        val = (u64) txr->tx_desc_mapping & 0xffffffff;
5174        bnx2_ctx_wr(bp, cid_addr, offset3, val);
5175}
5176
5177static void
5178bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5179{
5180        struct bnx2_tx_bd *txbd;
5181        u32 cid = TX_CID;
5182        struct bnx2_napi *bnapi;
5183        struct bnx2_tx_ring_info *txr;
5184
5185        bnapi = &bp->bnx2_napi[ring_num];
5186        txr = &bnapi->tx_ring;
5187
5188        if (ring_num == 0)
5189                cid = TX_CID;
5190        else
5191                cid = TX_TSS_CID + ring_num - 1;
5192
5193        bp->tx_wake_thresh = bp->tx_ring_size / 2;
5194
5195        txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5196
5197        txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5198        txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5199
5200        txr->tx_prod = 0;
5201        txr->tx_prod_bseq = 0;
5202
5203        txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5204        txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5205
5206        bnx2_init_tx_context(bp, cid, txr);
5207}
5208
5209static void
5210bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5211                     u32 buf_size, int num_rings)
5212{
5213        int i;
5214        struct bnx2_rx_bd *rxbd;
5215
5216        for (i = 0; i < num_rings; i++) {
5217                int j;
5218
5219                rxbd = &rx_ring[i][0];
5220                for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5221                        rxbd->rx_bd_len = buf_size;
5222                        rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5223                }
5224                if (i == (num_rings - 1))
5225                        j = 0;
5226                else
5227                        j = i + 1;
5228                rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5229                rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5230        }
5231}
5232
5233static void
5234bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5235{
5236        int i;
5237        u16 prod, ring_prod;
5238        u32 cid, rx_cid_addr, val;
5239        struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5240        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5241
5242        if (ring_num == 0)
5243                cid = RX_CID;
5244        else
5245                cid = RX_RSS_CID + ring_num - 1;
5246
5247        rx_cid_addr = GET_CID_ADDR(cid);
5248
5249        bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5250                             bp->rx_buf_use_size, bp->rx_max_ring);
5251
5252        bnx2_init_rx_context(bp, cid);
5253
5254        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5255                val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5256                BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5257        }
5258
5259        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5260        if (bp->rx_pg_ring_size) {
5261                bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5262                                     rxr->rx_pg_desc_mapping,
5263                                     PAGE_SIZE, bp->rx_max_pg_ring);
5264                val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5265                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5266                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5267                       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5268
5269                val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5270                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5271
5272                val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5273                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5274
5275                if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5276                        BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5277        }
5278
5279        val = (u64) rxr->rx_desc_mapping[0] >> 32;
5280        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5281
5282        val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5283        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5284
5285        ring_prod = prod = rxr->rx_pg_prod;
5286        for (i = 0; i < bp->rx_pg_ring_size; i++) {
5287                if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5288                        netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5289                                    ring_num, i, bp->rx_pg_ring_size);
5290                        break;
5291                }
5292                prod = BNX2_NEXT_RX_BD(prod);
5293                ring_prod = BNX2_RX_PG_RING_IDX(prod);
5294        }
5295        rxr->rx_pg_prod = prod;
5296
5297        ring_prod = prod = rxr->rx_prod;
5298        for (i = 0; i < bp->rx_ring_size; i++) {
5299                if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5300                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5301                                    ring_num, i, bp->rx_ring_size);
5302                        break;
5303                }
5304                prod = BNX2_NEXT_RX_BD(prod);
5305                ring_prod = BNX2_RX_RING_IDX(prod);
5306        }
5307        rxr->rx_prod = prod;
5308
5309        rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5310        rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5311        rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5312
5313        BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5314        BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5315
5316        BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5317}
5318
5319static void
5320bnx2_init_all_rings(struct bnx2 *bp)
5321{
5322        int i;
5323        u32 val;
5324
5325        bnx2_clear_ring_states(bp);
5326
5327        BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5328        for (i = 0; i < bp->num_tx_rings; i++)
5329                bnx2_init_tx_ring(bp, i);
5330
5331        if (bp->num_tx_rings > 1)
5332                BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5333                        (TX_TSS_CID << 7));
5334
5335        BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5336        bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5337
5338        for (i = 0; i < bp->num_rx_rings; i++)
5339                bnx2_init_rx_ring(bp, i);
5340
5341        if (bp->num_rx_rings > 1) {
5342                u32 tbl_32 = 0;
5343
5344                for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5345                        int shift = (i % 8) << 2;
5346
5347                        tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5348                        if ((i % 8) == 7) {
5349                                BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5350                                BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5351                                        BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5352                                        BNX2_RLUP_RSS_COMMAND_WRITE |
5353                                        BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5354                                tbl_32 = 0;
5355                        }
5356                }
5357
5358                val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5359                      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5360
5361                BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5362
5363        }
5364}
5365
5366static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5367{
5368        u32 max, num_rings = 1;
5369
5370        while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5371                ring_size -= BNX2_MAX_RX_DESC_CNT;
5372                num_rings++;
5373        }
5374        /* round to next power of 2 */
5375        max = max_size;
5376        while ((max & num_rings) == 0)
5377                max >>= 1;
5378
5379        if (num_rings != max)
5380                max <<= 1;
5381
5382        return max;
5383}
5384
5385static void
5386bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5387{
5388        u32 rx_size, rx_space, jumbo_size;
5389
5390        /* 8 for CRC and VLAN */
5391        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5392
5393        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5394                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5395
5396        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5397        bp->rx_pg_ring_size = 0;
5398        bp->rx_max_pg_ring = 0;
5399        bp->rx_max_pg_ring_idx = 0;
5400        if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5401                int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5402
5403                jumbo_size = size * pages;
5404                if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5405                        jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5406
5407                bp->rx_pg_ring_size = jumbo_size;
5408                bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5409                                                        BNX2_MAX_RX_PG_RINGS);
5410                bp->rx_max_pg_ring_idx =
5411                        (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5412                rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5413                bp->rx_copy_thresh = 0;
5414        }
5415
5416        bp->rx_buf_use_size = rx_size;
5417        /* hw alignment + build_skb() overhead*/
5418        bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5419                NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5420        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5421        bp->rx_ring_size = size;
5422        bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5423        bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5424}
5425
5426static void
5427bnx2_free_tx_skbs(struct bnx2 *bp)
5428{
5429        int i;
5430
5431        for (i = 0; i < bp->num_tx_rings; i++) {
5432                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5433                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5434                int j;
5435
5436                if (!txr->tx_buf_ring)
5437                        continue;
5438
5439                for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5440                        struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5441                        struct sk_buff *skb = tx_buf->skb;
5442                        int k, last;
5443
5444                        if (!skb) {
5445                                j = BNX2_NEXT_TX_BD(j);
5446                                continue;
5447                        }
5448
5449                        dma_unmap_single(&bp->pdev->dev,
5450                                         dma_unmap_addr(tx_buf, mapping),
5451                                         skb_headlen(skb),
5452                                         DMA_TO_DEVICE);
5453
5454                        tx_buf->skb = NULL;
5455
5456                        last = tx_buf->nr_frags;
5457                        j = BNX2_NEXT_TX_BD(j);
5458                        for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5459                                tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5460                                dma_unmap_page(&bp->pdev->dev,
5461                                        dma_unmap_addr(tx_buf, mapping),
5462                                        skb_frag_size(&skb_shinfo(skb)->frags[k]),
5463                                        DMA_TO_DEVICE);
5464                        }
5465                        dev_kfree_skb(skb);
5466                }
5467                netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5468        }
5469}
5470
5471static void
5472bnx2_free_rx_skbs(struct bnx2 *bp)
5473{
5474        int i;
5475
5476        for (i = 0; i < bp->num_rx_rings; i++) {
5477                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5478                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5479                int j;
5480
5481                if (!rxr->rx_buf_ring)
5482                        return;
5483
5484                for (j = 0; j < bp->rx_max_ring_idx; j++) {
5485                        struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5486                        u8 *data = rx_buf->data;
5487
5488                        if (!data)
5489                                continue;
5490
5491                        dma_unmap_single(&bp->pdev->dev,
5492                                         dma_unmap_addr(rx_buf, mapping),
5493                                         bp->rx_buf_use_size,
5494                                         DMA_FROM_DEVICE);
5495
5496                        rx_buf->data = NULL;
5497
5498                        kfree(data);
5499                }
5500                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5501                        bnx2_free_rx_page(bp, rxr, j);
5502        }
5503}
5504
5505static void
5506bnx2_free_skbs(struct bnx2 *bp)
5507{
5508        bnx2_free_tx_skbs(bp);
5509        bnx2_free_rx_skbs(bp);
5510}
5511
5512static int
5513bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5514{
5515        int rc;
5516
5517        rc = bnx2_reset_chip(bp, reset_code);
5518        bnx2_free_skbs(bp);
5519        if (rc)
5520                return rc;
5521
5522        if ((rc = bnx2_init_chip(bp)) != 0)
5523                return rc;
5524
5525        bnx2_init_all_rings(bp);
5526        return 0;
5527}
5528
5529static int
5530bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5531{
5532        int rc;
5533
5534        if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5535                return rc;
5536
5537        spin_lock_bh(&bp->phy_lock);
5538        bnx2_init_phy(bp, reset_phy);
5539        bnx2_set_link(bp);
5540        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5541                bnx2_remote_phy_event(bp);
5542        spin_unlock_bh(&bp->phy_lock);
5543        return 0;
5544}
5545
5546static int
5547bnx2_shutdown_chip(struct bnx2 *bp)
5548{
5549        u32 reset_code;
5550
5551        if (bp->flags & BNX2_FLAG_NO_WOL)
5552                reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5553        else if (bp->wol)
5554                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5555        else
5556                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5557
5558        return bnx2_reset_chip(bp, reset_code);
5559}
5560
5561static int
5562bnx2_test_registers(struct bnx2 *bp)
5563{
5564        int ret;
5565        int i, is_5709;
5566        static const struct {
5567                u16   offset;
5568                u16   flags;
5569#define BNX2_FL_NOT_5709        1
5570                u32   rw_mask;
5571                u32   ro_mask;
5572        } reg_tbl[] = {
5573                { 0x006c, 0, 0x00000000, 0x0000003f },
5574                { 0x0090, 0, 0xffffffff, 0x00000000 },
5575                { 0x0094, 0, 0x00000000, 0x00000000 },
5576
5577                { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5578                { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5579                { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5580                { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5581                { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5582                { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5583                { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5584                { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585                { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586
5587                { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588                { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589                { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5590                { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5591                { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5592                { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593
5594                { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5595                { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5596                { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5597
5598                { 0x1000, 0, 0x00000000, 0x00000001 },
5599                { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5600
5601                { 0x1408, 0, 0x01c00800, 0x00000000 },
5602                { 0x149c, 0, 0x8000ffff, 0x00000000 },
5603                { 0x14a8, 0, 0x00000000, 0x000001ff },
5604                { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5605                { 0x14b0, 0, 0x00000002, 0x00000001 },
5606                { 0x14b8, 0, 0x00000000, 0x00000000 },
5607                { 0x14c0, 0, 0x00000000, 0x00000009 },
5608                { 0x14c4, 0, 0x00003fff, 0x00000000 },
5609                { 0x14cc, 0, 0x00000000, 0x00000001 },
5610                { 0x14d0, 0, 0xffffffff, 0x00000000 },
5611
5612                { 0x1800, 0, 0x00000000, 0x00000001 },
5613                { 0x1804, 0, 0x00000000, 0x00000003 },
5614
5615                { 0x2800, 0, 0x00000000, 0x00000001 },
5616                { 0x2804, 0, 0x00000000, 0x00003f01 },
5617                { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5618                { 0x2810, 0, 0xffff0000, 0x00000000 },
5619                { 0x2814, 0, 0xffff0000, 0x00000000 },
5620                { 0x2818, 0, 0xffff0000, 0x00000000 },
5621                { 0x281c, 0, 0xffff0000, 0x00000000 },
5622                { 0x2834, 0, 0xffffffff, 0x00000000 },
5623                { 0x2840, 0, 0x00000000, 0xffffffff },
5624                { 0x2844, 0, 0x00000000, 0xffffffff },
5625                { 0x2848, 0, 0xffffffff, 0x00000000 },
5626                { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5627
5628                { 0x2c00, 0, 0x00000000, 0x00000011 },
5629                { 0x2c04, 0, 0x00000000, 0x00030007 },
5630
5631                { 0x3c00, 0, 0x00000000, 0x00000001 },
5632                { 0x3c04, 0, 0x00000000, 0x00070000 },
5633                { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5634                { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5635                { 0x3c10, 0, 0xffffffff, 0x00000000 },
5636                { 0x3c14, 0, 0x00000000, 0xffffffff },
5637                { 0x3c18, 0, 0x00000000, 0xffffffff },
5638                { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5639                { 0x3c20, 0, 0xffffff00, 0x00000000 },
5640
5641                { 0x5004, 0, 0x00000000, 0x0000007f },
5642                { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5643
5644                { 0x5c00, 0, 0x00000000, 0x00000001 },
5645                { 0x5c04, 0, 0x00000000, 0x0003000f },
5646                { 0x5c08, 0, 0x00000003, 0x00000000 },
5647                { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5648                { 0x5c10, 0, 0x00000000, 0xffffffff },
5649                { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5650                { 0x5c84, 0, 0x00000000, 0x0000f333 },
5651                { 0x5c88, 0, 0x00000000, 0x00077373 },
5652                { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5653
5654                { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5655                { 0x680c, 0, 0xffffffff, 0x00000000 },
5656                { 0x6810, 0, 0xffffffff, 0x00000000 },
5657                { 0x6814, 0, 0xffffffff, 0x00000000 },
5658                { 0x6818, 0, 0xffffffff, 0x00000000 },
5659                { 0x681c, 0, 0xffffffff, 0x00000000 },
5660                { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5661                { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5662                { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5663                { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5664                { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5665                { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5666                { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5667                { 0x683c, 0, 0x0000ffff, 0x00000000 },
5668                { 0x6840, 0, 0x00000ff0, 0x00000000 },
5669                { 0x6844, 0, 0x00ffff00, 0x00000000 },
5670                { 0x684c, 0, 0xffffffff, 0x00000000 },
5671                { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5672                { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5673                { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5674                { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5675                { 0x6908, 0, 0x00000000, 0x0001ff0f },
5676                { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5677
5678                { 0xffff, 0, 0x00000000, 0x00000000 },
5679        };
5680
5681        ret = 0;
5682        is_5709 = 0;
5683        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5684                is_5709 = 1;
5685
5686        for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5687                u32 offset, rw_mask, ro_mask, save_val, val;
5688                u16 flags = reg_tbl[i].flags;
5689
5690                if (is_5709 && (flags & BNX2_FL_NOT_5709))
5691                        continue;
5692
5693                offset = (u32) reg_tbl[i].offset;
5694                rw_mask = reg_tbl[i].rw_mask;
5695                ro_mask = reg_tbl[i].ro_mask;
5696
5697                save_val = readl(bp->regview + offset);
5698
5699                writel(0, bp->regview + offset);
5700
5701                val = readl(bp->regview + offset);
5702                if ((val & rw_mask) != 0) {
5703                        goto reg_test_err;
5704                }
5705
5706                if ((val & ro_mask) != (save_val & ro_mask)) {
5707                        goto reg_test_err;
5708                }
5709
5710                writel(0xffffffff, bp->regview + offset);
5711
5712                val = readl(bp->regview + offset);
5713                if ((val & rw_mask) != rw_mask) {
5714                        goto reg_test_err;
5715                }
5716
5717                if ((val & ro_mask) != (save_val & ro_mask)) {
5718                        goto reg_test_err;
5719                }
5720
5721                writel(save_val, bp->regview + offset);
5722                continue;
5723
5724reg_test_err:
5725                writel(save_val, bp->regview + offset);
5726                ret = -ENODEV;
5727                break;
5728        }
5729        return ret;
5730}
5731
5732static int
5733bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5734{
5735        static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5736                0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5737        int i;
5738
5739        for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5740                u32 offset;
5741
5742                for (offset = 0; offset < size; offset += 4) {
5743
5744                        bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5745
5746                        if (bnx2_reg_rd_ind(bp, start + offset) !=
5747                                test_pattern[i]) {
5748                                return -ENODEV;
5749                        }
5750                }
5751        }
5752        return 0;
5753}
5754
5755static int
5756bnx2_test_memory(struct bnx2 *bp)
5757{
5758        int ret = 0;
5759        int i;
5760        static struct mem_entry {
5761                u32   offset;
5762                u32   len;
5763        } mem_tbl_5706[] = {
5764                { 0x60000,  0x4000 },
5765                { 0xa0000,  0x3000 },
5766                { 0xe0000,  0x4000 },
5767                { 0x120000, 0x4000 },
5768                { 0x1a0000, 0x4000 },
5769                { 0x160000, 0x4000 },
5770                { 0xffffffff, 0    },
5771        },
5772        mem_tbl_5709[] = {
5773                { 0x60000,  0x4000 },
5774                { 0xa0000,  0x3000 },
5775                { 0xe0000,  0x4000 },
5776                { 0x120000, 0x4000 },
5777                { 0x1a0000, 0x4000 },
5778                { 0xffffffff, 0    },
5779        };
5780        struct mem_entry *mem_tbl;
5781
5782        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5783                mem_tbl = mem_tbl_5709;
5784        else
5785                mem_tbl = mem_tbl_5706;
5786
5787        for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5788                if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5789                        mem_tbl[i].len)) != 0) {
5790                        return ret;
5791                }
5792        }
5793
5794        return ret;
5795}
5796
5797#define BNX2_MAC_LOOPBACK       0
5798#define BNX2_PHY_LOOPBACK       1
5799
5800static int
5801bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802{
5803        unsigned int pkt_size, num_pkts, i;
5804        struct sk_buff *skb;
5805        u8 *data;
5806        unsigned char *packet;
5807        u16 rx_start_idx, rx_idx;
5808        dma_addr_t map;
5809        struct bnx2_tx_bd *txbd;
5810        struct bnx2_sw_bd *rx_buf;
5811        struct l2_fhdr *rx_hdr;
5812        int ret = -ENODEV;
5813        struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5814        struct bnx2_tx_ring_info *txr;
5815        struct bnx2_rx_ring_info *rxr;
5816
5817        tx_napi = bnapi;
5818
5819        txr = &tx_napi->tx_ring;
5820        rxr = &bnapi->rx_ring;
5821        if (loopback_mode == BNX2_MAC_LOOPBACK) {
5822                bp->loopback = MAC_LOOPBACK;
5823                bnx2_set_mac_loopback(bp);
5824        }
5825        else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5826                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5827                        return 0;
5828
5829                bp->loopback = PHY_LOOPBACK;
5830                bnx2_set_phy_loopback(bp);
5831        }
5832        else
5833                return -EINVAL;
5834
5835        pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5836        skb = netdev_alloc_skb(bp->dev, pkt_size);
5837        if (!skb)
5838                return -ENOMEM;
5839        packet = skb_put(skb, pkt_size);
5840        memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5841        memset(packet + ETH_ALEN, 0x0, 8);
5842        for (i = 14; i < pkt_size; i++)
5843                packet[i] = (unsigned char) (i & 0xff);
5844
5845        map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5846                             DMA_TO_DEVICE);
5847        if (dma_mapping_error(&bp->pdev->dev, map)) {
5848                dev_kfree_skb(skb);
5849                return -EIO;
5850        }
5851
5852        BNX2_WR(bp, BNX2_HC_COMMAND,
5853                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5854
5855        BNX2_RD(bp, BNX2_HC_COMMAND);
5856
5857        udelay(5);
5858        rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5859
5860        num_pkts = 0;
5861
5862        txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5863
5864        txbd->tx_bd_haddr_hi = (u64) map >> 32;
5865        txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5866        txbd->tx_bd_mss_nbytes = pkt_size;
5867        txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5868
5869        num_pkts++;
5870        txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5871        txr->tx_prod_bseq += pkt_size;
5872
5873        BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5874        BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5875
5876        udelay(100);
5877
5878        BNX2_WR(bp, BNX2_HC_COMMAND,
5879                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5880
5881        BNX2_RD(bp, BNX2_HC_COMMAND);
5882
5883        udelay(5);
5884
5885        dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5886        dev_kfree_skb(skb);
5887
5888        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5889                goto loopback_test_done;
5890
5891        rx_idx = bnx2_get_hw_rx_cons(bnapi);
5892        if (rx_idx != rx_start_idx + num_pkts) {
5893                goto loopback_test_done;
5894        }
5895
5896        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5897        data = rx_buf->data;
5898
5899        rx_hdr = get_l2_fhdr(data);
5900        data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5901
5902        dma_sync_single_for_cpu(&bp->pdev->dev,
5903                dma_unmap_addr(rx_buf, mapping),
5904                bp->rx_buf_use_size, DMA_FROM_DEVICE);
5905
5906        if (rx_hdr->l2_fhdr_status &
5907                (L2_FHDR_ERRORS_BAD_CRC |
5908                L2_FHDR_ERRORS_PHY_DECODE |
5909                L2_FHDR_ERRORS_ALIGNMENT |
5910                L2_FHDR_ERRORS_TOO_SHORT |
5911                L2_FHDR_ERRORS_GIANT_FRAME)) {
5912
5913                goto loopback_test_done;
5914        }
5915
5916        if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5917                goto loopback_test_done;
5918        }
5919
5920        for (i = 14; i < pkt_size; i++) {
5921                if (*(data + i) != (unsigned char) (i & 0xff)) {
5922                        goto loopback_test_done;
5923                }
5924        }
5925
5926        ret = 0;
5927
5928loopback_test_done:
5929        bp->loopback = 0;
5930        return ret;
5931}
5932
5933#define BNX2_MAC_LOOPBACK_FAILED        1
5934#define BNX2_PHY_LOOPBACK_FAILED        2
5935#define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5936                                         BNX2_PHY_LOOPBACK_FAILED)
5937
5938static int
5939bnx2_test_loopback(struct bnx2 *bp)
5940{
5941        int rc = 0;
5942
5943        if (!netif_running(bp->dev))
5944                return BNX2_LOOPBACK_FAILED;
5945
5946        bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5947        spin_lock_bh(&bp->phy_lock);
5948        bnx2_init_phy(bp, 1);
5949        spin_unlock_bh(&bp->phy_lock);
5950        if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5951                rc |= BNX2_MAC_LOOPBACK_FAILED;
5952        if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5953                rc |= BNX2_PHY_LOOPBACK_FAILED;
5954        return rc;
5955}
5956
5957#define NVRAM_SIZE 0x200
5958#define CRC32_RESIDUAL 0xdebb20e3
5959
5960static int
5961bnx2_test_nvram(struct bnx2 *bp)
5962{
5963        __be32 buf[NVRAM_SIZE / 4];
5964        u8 *data = (u8 *) buf;
5965        int rc = 0;
5966        u32 magic, csum;
5967
5968        if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5969                goto test_nvram_done;
5970
5971        magic = be32_to_cpu(buf[0]);
5972        if (magic != 0x669955aa) {
5973                rc = -ENODEV;
5974                goto test_nvram_done;
5975        }
5976
5977        if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5978                goto test_nvram_done;
5979
5980        csum = ether_crc_le(0x100, data);
5981        if (csum != CRC32_RESIDUAL) {
5982                rc = -ENODEV;
5983                goto test_nvram_done;
5984        }
5985
5986        csum = ether_crc_le(0x100, data + 0x100);
5987        if (csum != CRC32_RESIDUAL) {
5988                rc = -ENODEV;
5989        }
5990
5991test_nvram_done:
5992        return rc;
5993}
5994
5995static int
5996bnx2_test_link(struct bnx2 *bp)
5997{
5998        u32 bmsr;
5999
6000        if (!netif_running(bp->dev))
6001                return -ENODEV;
6002
6003        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6004                if (bp->link_up)
6005                        return 0;
6006                return -ENODEV;
6007        }
6008        spin_lock_bh(&bp->phy_lock);
6009        bnx2_enable_bmsr1(bp);
6010        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6011        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6012        bnx2_disable_bmsr1(bp);
6013        spin_unlock_bh(&bp->phy_lock);
6014
6015        if (bmsr & BMSR_LSTATUS) {
6016                return 0;
6017        }
6018        return -ENODEV;
6019}
6020
6021static int
6022bnx2_test_intr(struct bnx2 *bp)
6023{
6024        int i;
6025        u16 status_idx;
6026
6027        if (!netif_running(bp->dev))
6028                return -ENODEV;
6029
6030        status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6031
6032        /* This register is not touched during run-time. */
6033        BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6034        BNX2_RD(bp, BNX2_HC_COMMAND);
6035
6036        for (i = 0; i < 10; i++) {
6037                if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6038                        status_idx) {
6039
6040                        break;
6041                }
6042
6043                msleep_interruptible(10);
6044        }
6045        if (i < 10)
6046                return 0;
6047
6048        return -ENODEV;
6049}
6050
6051/* Determining link for parallel detection. */
6052static int
6053bnx2_5706_serdes_has_link(struct bnx2 *bp)
6054{
6055        u32 mode_ctl, an_dbg, exp;
6056
6057        if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6058                return 0;
6059
6060        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6061        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6062
6063        if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6064                return 0;
6065
6066        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6067        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6068        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6069
6070        if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6071                return 0;
6072
6073        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6074        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6075        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6076
6077        if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6078                return 0;
6079
6080        return 1;
6081}
6082
6083static void
6084bnx2_5706_serdes_timer(struct bnx2 *bp)
6085{
6086        int check_link = 1;
6087
6088        spin_lock(&bp->phy_lock);
6089        if (bp->serdes_an_pending) {
6090                bp->serdes_an_pending--;
6091                check_link = 0;
6092        } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6093                u32 bmcr;
6094
6095                bp->current_interval = BNX2_TIMER_INTERVAL;
6096
6097                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6098
6099                if (bmcr & BMCR_ANENABLE) {
6100                        if (bnx2_5706_serdes_has_link(bp)) {
6101                                bmcr &= ~BMCR_ANENABLE;
6102                                bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6103                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104                                bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6105                        }
6106                }
6107        }
6108        else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6109                 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6110                u32 phy2;
6111
6112                bnx2_write_phy(bp, 0x17, 0x0f01);
6113                bnx2_read_phy(bp, 0x15, &phy2);
6114                if (phy2 & 0x20) {
6115                        u32 bmcr;
6116
6117                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6118                        bmcr |= BMCR_ANENABLE;
6119                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6120
6121                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6122                }
6123        } else
6124                bp->current_interval = BNX2_TIMER_INTERVAL;
6125
6126        if (check_link) {
6127                u32 val;
6128
6129                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6130                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6131                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6132
6133                if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6134                        if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6135                                bnx2_5706s_force_link_dn(bp, 1);
6136                                bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6137                        } else
6138                                bnx2_set_link(bp);
6139                } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6140                        bnx2_set_link(bp);
6141        }
6142        spin_unlock(&bp->phy_lock);
6143}
6144
6145static void
6146bnx2_5708_serdes_timer(struct bnx2 *bp)
6147{
6148        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6149                return;
6150
6151        if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6152                bp->serdes_an_pending = 0;
6153                return;
6154        }
6155
6156        spin_lock(&bp->phy_lock);
6157        if (bp->serdes_an_pending)
6158                bp->serdes_an_pending--;
6159        else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6160                u32 bmcr;
6161
6162                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6163                if (bmcr & BMCR_ANENABLE) {
6164                        bnx2_enable_forced_2g5(bp);
6165                        bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6166                } else {
6167                        bnx2_disable_forced_2g5(bp);
6168                        bp->serdes_an_pending = 2;
6169                        bp->current_interval = BNX2_TIMER_INTERVAL;
6170                }
6171
6172        } else
6173                bp->current_interval = BNX2_TIMER_INTERVAL;
6174
6175        spin_unlock(&bp->phy_lock);
6176}
6177
6178static void
6179bnx2_timer(struct timer_list *t)
6180{
6181        struct bnx2 *bp = from_timer(bp, t, timer);
6182
6183        if (!netif_running(bp->dev))
6184                return;
6185
6186        if (atomic_read(&bp->intr_sem) != 0)
6187                goto bnx2_restart_timer;
6188
6189        if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6190             BNX2_FLAG_USING_MSI)
6191                bnx2_chk_missed_msi(bp);
6192
6193        bnx2_send_heart_beat(bp);
6194
6195        bp->stats_blk->stat_FwRxDrop =
6196                bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6197
6198        /* workaround occasional corrupted counters */
6199        if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6200                BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6201                        BNX2_HC_COMMAND_STATS_NOW);
6202
6203        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6204                if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6205                        bnx2_5706_serdes_timer(bp);
6206                else
6207                        bnx2_5708_serdes_timer(bp);
6208        }
6209
6210bnx2_restart_timer:
6211        mod_timer(&bp->timer, jiffies + bp->current_interval);
6212}
6213
6214static int
6215bnx2_request_irq(struct bnx2 *bp)
6216{
6217        unsigned long flags;
6218        struct bnx2_irq *irq;
6219        int rc = 0, i;
6220
6221        if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6222                flags = 0;
6223        else
6224                flags = IRQF_SHARED;
6225
6226        for (i = 0; i < bp->irq_nvecs; i++) {
6227                irq = &bp->irq_tbl[i];
6228                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6229                                 &bp->bnx2_napi[i]);
6230                if (rc)
6231                        break;
6232                irq->requested = 1;
6233        }
6234        return rc;
6235}
6236
6237static void
6238__bnx2_free_irq(struct bnx2 *bp)
6239{
6240        struct bnx2_irq *irq;
6241        int i;
6242
6243        for (i = 0; i < bp->irq_nvecs; i++) {
6244                irq = &bp->irq_tbl[i];
6245                if (irq->requested)
6246                        free_irq(irq->vector, &bp->bnx2_napi[i]);
6247                irq->requested = 0;
6248        }
6249}
6250
6251static void
6252bnx2_free_irq(struct bnx2 *bp)
6253{
6254
6255        __bnx2_free_irq(bp);
6256        if (bp->flags & BNX2_FLAG_USING_MSI)
6257                pci_disable_msi(bp->pdev);
6258        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6259                pci_disable_msix(bp->pdev);
6260
6261        bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6262}
6263
6264static void
6265bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6266{
6267        int i, total_vecs;
6268        struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6269        struct net_device *dev = bp->dev;
6270        const int len = sizeof(bp->irq_tbl[0].name);
6271
6272        bnx2_setup_msix_tbl(bp);
6273        BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6274        BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6275        BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6276
6277        /*  Need to flush the previous three writes to ensure MSI-X
6278         *  is setup properly */
6279        BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6280
6281        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6282                msix_ent[i].entry = i;
6283                msix_ent[i].vector = 0;
6284        }
6285
6286        total_vecs = msix_vecs;
6287#ifdef BCM_CNIC
6288        total_vecs++;
6289#endif
6290        total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6291                                           BNX2_MIN_MSIX_VEC, total_vecs);
6292        if (total_vecs < 0)
6293                return;
6294
6295        msix_vecs = total_vecs;
6296#ifdef BCM_CNIC
6297        msix_vecs--;
6298#endif
6299        bp->irq_nvecs = msix_vecs;
6300        bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6301        for (i = 0; i < total_vecs; i++) {
6302                bp->irq_tbl[i].vector = msix_ent[i].vector;
6303                snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6304                bp->irq_tbl[i].handler = bnx2_msi_1shot;
6305        }
6306}
6307
6308static int
6309bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6310{
6311        int cpus = netif_get_num_default_rss_queues();
6312        int msix_vecs;
6313
6314        if (!bp->num_req_rx_rings)
6315                msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6316        else if (!bp->num_req_tx_rings)
6317                msix_vecs = max(cpus, bp->num_req_rx_rings);
6318        else
6319                msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6320
6321        msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6322
6323        bp->irq_tbl[0].handler = bnx2_interrupt;
6324        strcpy(bp->irq_tbl[0].name, bp->dev->name);
6325        bp->irq_nvecs = 1;
6326        bp->irq_tbl[0].vector = bp->pdev->irq;
6327
6328        if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6329                bnx2_enable_msix(bp, msix_vecs);
6330
6331        if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6332            !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6333                if (pci_enable_msi(bp->pdev) == 0) {
6334                        bp->flags |= BNX2_FLAG_USING_MSI;
6335                        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6336                                bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6337                                bp->irq_tbl[0].handler = bnx2_msi_1shot;
6338                        } else
6339                                bp->irq_tbl[0].handler = bnx2_msi;
6340
6341                        bp->irq_tbl[0].vector = bp->pdev->irq;
6342                }
6343        }
6344
6345        if (!bp->num_req_tx_rings)
6346                bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6347        else
6348                bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6349
6350        if (!bp->num_req_rx_rings)
6351                bp->num_rx_rings = bp->irq_nvecs;
6352        else
6353                bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6354
6355        netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6356
6357        return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6358}
6359
6360/* Called with rtnl_lock */
6361static int
6362bnx2_open(struct net_device *dev)
6363{
6364        struct bnx2 *bp = netdev_priv(dev);
6365        int rc;
6366
6367        rc = bnx2_request_firmware(bp);
6368        if (rc < 0)
6369                goto out;
6370
6371        netif_carrier_off(dev);
6372
6373        bnx2_disable_int(bp);
6374
6375        rc = bnx2_setup_int_mode(bp, disable_msi);
6376        if (rc)
6377                goto open_err;
6378        bnx2_init_napi(bp);
6379        bnx2_napi_enable(bp);
6380        rc = bnx2_alloc_mem(bp);
6381        if (rc)
6382                goto open_err;
6383
6384        rc = bnx2_request_irq(bp);
6385        if (rc)
6386                goto open_err;
6387
6388        rc = bnx2_init_nic(bp, 1);
6389        if (rc)
6390                goto open_err;
6391
6392        mod_timer(&bp->timer, jiffies + bp->current_interval);
6393
6394        atomic_set(&bp->intr_sem, 0);
6395
6396        memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6397
6398        bnx2_enable_int(bp);
6399
6400        if (bp->flags & BNX2_FLAG_USING_MSI) {
6401                /* Test MSI to make sure it is working
6402                 * If MSI test fails, go back to INTx mode
6403                 */
6404                if (bnx2_test_intr(bp) != 0) {
6405                        netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6406
6407                        bnx2_disable_int(bp);
6408                        bnx2_free_irq(bp);
6409
6410                        bnx2_setup_int_mode(bp, 1);
6411
6412                        rc = bnx2_init_nic(bp, 0);
6413
6414                        if (!rc)
6415                                rc = bnx2_request_irq(bp);
6416
6417                        if (rc) {
6418                                del_timer_sync(&bp->timer);
6419                                goto open_err;
6420                        }
6421                        bnx2_enable_int(bp);
6422                }
6423        }
6424        if (bp->flags & BNX2_FLAG_USING_MSI)
6425                netdev_info(dev, "using MSI\n");
6426        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6427                netdev_info(dev, "using MSIX\n");
6428
6429        netif_tx_start_all_queues(dev);
6430out:
6431        return rc;
6432
6433open_err:
6434        bnx2_napi_disable(bp);
6435        bnx2_free_skbs(bp);
6436        bnx2_free_irq(bp);
6437        bnx2_free_mem(bp);
6438        bnx2_del_napi(bp);
6439        bnx2_release_firmware(bp);
6440        goto out;
6441}
6442
6443static void
6444bnx2_reset_task(struct work_struct *work)
6445{
6446        struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6447        int rc;
6448        u16 pcicmd;
6449
6450        rtnl_lock();
6451        if (!netif_running(bp->dev)) {
6452                rtnl_unlock();
6453                return;
6454        }
6455
6456        bnx2_netif_stop(bp, true);
6457
6458        pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6459        if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6460                /* in case PCI block has reset */
6461                pci_restore_state(bp->pdev);
6462                pci_save_state(bp->pdev);
6463        }
6464        rc = bnx2_init_nic(bp, 1);
6465        if (rc) {
6466                netdev_err(bp->dev, "failed to reset NIC, closing\n");
6467                bnx2_napi_enable(bp);
6468                dev_close(bp->dev);
6469                rtnl_unlock();
6470                return;
6471        }
6472
6473        atomic_set(&bp->intr_sem, 1);
6474        bnx2_netif_start(bp, true);
6475        rtnl_unlock();
6476}
6477
6478#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6479
6480static void
6481bnx2_dump_ftq(struct bnx2 *bp)
6482{
6483        int i;
6484        u32 reg, bdidx, cid, valid;
6485        struct net_device *dev = bp->dev;
6486        static const struct ftq_reg {
6487                char *name;
6488                u32 off;
6489        } ftq_arr[] = {
6490                BNX2_FTQ_ENTRY(RV2P_P),
6491                BNX2_FTQ_ENTRY(RV2P_T),
6492                BNX2_FTQ_ENTRY(RV2P_M),
6493                BNX2_FTQ_ENTRY(TBDR_),
6494                BNX2_FTQ_ENTRY(TDMA_),
6495                BNX2_FTQ_ENTRY(TXP_),
6496                BNX2_FTQ_ENTRY(TXP_),
6497                BNX2_FTQ_ENTRY(TPAT_),
6498                BNX2_FTQ_ENTRY(RXP_C),
6499                BNX2_FTQ_ENTRY(RXP_),
6500                BNX2_FTQ_ENTRY(COM_COMXQ_),
6501                BNX2_FTQ_ENTRY(COM_COMTQ_),
6502                BNX2_FTQ_ENTRY(COM_COMQ_),
6503                BNX2_FTQ_ENTRY(CP_CPQ_),
6504        };
6505
6506        netdev_err(dev, "<--- start FTQ dump --->\n");
6507        for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6508                netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6509                           bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6510
6511        netdev_err(dev, "CPU states:\n");
6512        for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6513                netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6514                           reg, bnx2_reg_rd_ind(bp, reg),
6515                           bnx2_reg_rd_ind(bp, reg + 4),
6516                           bnx2_reg_rd_ind(bp, reg + 8),
6517                           bnx2_reg_rd_ind(bp, reg + 0x1c),
6518                           bnx2_reg_rd_ind(bp, reg + 0x1c),
6519                           bnx2_reg_rd_ind(bp, reg + 0x20));
6520
6521        netdev_err(dev, "<--- end FTQ dump --->\n");
6522        netdev_err(dev, "<--- start TBDC dump --->\n");
6523        netdev_err(dev, "TBDC free cnt: %ld\n",
6524                   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6525        netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6526        for (i = 0; i < 0x20; i++) {
6527                int j = 0;
6528
6529                BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6530                BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6531                        BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6532                BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6533                while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6534                        BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6535                        j++;
6536
6537                cid = BNX2_RD(bp, BNX2_TBDC_CID);
6538                bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6539                valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6540                netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6541                           i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6542                           bdidx >> 24, (valid >> 8) & 0x0ff);
6543        }
6544        netdev_err(dev, "<--- end TBDC dump --->\n");
6545}
6546
6547static void
6548bnx2_dump_state(struct bnx2 *bp)
6549{
6550        struct net_device *dev = bp->dev;
6551        u32 val1, val2;
6552
6553        pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6554        netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6555                   atomic_read(&bp->intr_sem), val1);
6556        pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6557        pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6558        netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6559        netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6560                   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6561                   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6562        netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6563                   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6564        netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6565                   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6566        if (bp->flags & BNX2_FLAG_USING_MSIX)
6567                netdev_err(dev, "DEBUG: PBA[%08x]\n",
6568                           BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6569}
6570
6571static void
6572bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6573{
6574        struct bnx2 *bp = netdev_priv(dev);
6575
6576        bnx2_dump_ftq(bp);
6577        bnx2_dump_state(bp);
6578        bnx2_dump_mcp_state(bp);
6579
6580        /* This allows the netif to be shutdown gracefully before resetting */
6581        schedule_work(&bp->reset_task);
6582}
6583
6584/* Called with netif_tx_lock.
6585 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6586 * netif_wake_queue().
6587 */
6588static netdev_tx_t
6589bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6590{
6591        struct bnx2 *bp = netdev_priv(dev);
6592        dma_addr_t mapping;
6593        struct bnx2_tx_bd *txbd;
6594        struct bnx2_sw_tx_bd *tx_buf;
6595        u32 len, vlan_tag_flags, last_frag, mss;
6596        u16 prod, ring_prod;
6597        int i;
6598        struct bnx2_napi *bnapi;
6599        struct bnx2_tx_ring_info *txr;
6600        struct netdev_queue *txq;
6601
6602        /*  Determine which tx ring we will be placed on */
6603        i = skb_get_queue_mapping(skb);
6604        bnapi = &bp->bnx2_napi[i];
6605        txr = &bnapi->tx_ring;
6606        txq = netdev_get_tx_queue(dev, i);
6607
6608        if (unlikely(bnx2_tx_avail(bp, txr) <
6609            (skb_shinfo(skb)->nr_frags + 1))) {
6610                netif_tx_stop_queue(txq);
6611                netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6612
6613                return NETDEV_TX_BUSY;
6614        }
6615        len = skb_headlen(skb);
6616        prod = txr->tx_prod;
6617        ring_prod = BNX2_TX_RING_IDX(prod);
6618
6619        vlan_tag_flags = 0;
6620        if (skb->ip_summed == CHECKSUM_PARTIAL) {
6621                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6622        }
6623
6624        if (skb_vlan_tag_present(skb)) {
6625                vlan_tag_flags |=
6626                        (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6627        }
6628
6629        if ((mss = skb_shinfo(skb)->gso_size)) {
6630                u32 tcp_opt_len;
6631                struct iphdr *iph;
6632
6633                vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6634
6635                tcp_opt_len = tcp_optlen(skb);
6636
6637                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6638                        u32 tcp_off = skb_transport_offset(skb) -
6639                                      sizeof(struct ipv6hdr) - ETH_HLEN;
6640
6641                        vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6642                                          TX_BD_FLAGS_SW_FLAGS;
6643                        if (likely(tcp_off == 0))
6644                                vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6645                        else {
6646                                tcp_off >>= 3;
6647                                vlan_tag_flags |= ((tcp_off & 0x3) <<
6648                                                   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6649                                                  ((tcp_off & 0x10) <<
6650                                                   TX_BD_FLAGS_TCP6_OFF4_SHL);
6651                                mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6652                        }
6653                } else {
6654                        iph = ip_hdr(skb);
6655                        if (tcp_opt_len || (iph->ihl > 5)) {
6656                                vlan_tag_flags |= ((iph->ihl - 5) +
6657                                                   (tcp_opt_len >> 2)) << 8;
6658                        }
6659                }
6660        } else
6661                mss = 0;
6662
6663        mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6664                                 DMA_TO_DEVICE);
6665        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6666                dev_kfree_skb_any(skb);
6667                return NETDEV_TX_OK;
6668        }
6669
6670        tx_buf = &txr->tx_buf_ring[ring_prod];
6671        tx_buf->skb = skb;
6672        dma_unmap_addr_set(tx_buf, mapping, mapping);
6673
6674        txbd = &txr->tx_desc_ring[ring_prod];
6675
6676        txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6677        txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6678        txbd->tx_bd_mss_nbytes = len | (mss << 16);
6679        txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6680
6681        last_frag = skb_shinfo(skb)->nr_frags;
6682        tx_buf->nr_frags = last_frag;
6683        tx_buf->is_gso = skb_is_gso(skb);
6684
6685        for (i = 0; i < last_frag; i++) {
6686                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6687
6688                prod = BNX2_NEXT_TX_BD(prod);
6689                ring_prod = BNX2_TX_RING_IDX(prod);
6690                txbd = &txr->tx_desc_ring[ring_prod];
6691
6692                len = skb_frag_size(frag);
6693                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6694                                           DMA_TO_DEVICE);
6695                if (dma_mapping_error(&bp->pdev->dev, mapping))
6696                        goto dma_error;
6697                dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6698                                   mapping);
6699
6700                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6701                txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6702                txbd->tx_bd_mss_nbytes = len | (mss << 16);
6703                txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6704
6705        }
6706        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6707
6708        /* Sync BD data before updating TX mailbox */
6709        wmb();
6710
6711        netdev_tx_sent_queue(txq, skb->len);
6712
6713        prod = BNX2_NEXT_TX_BD(prod);
6714        txr->tx_prod_bseq += skb->len;
6715
6716        BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6717        BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6718
6719        txr->tx_prod = prod;
6720
6721        if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6722                netif_tx_stop_queue(txq);
6723
6724                /* netif_tx_stop_queue() must be done before checking
6725                 * tx index in bnx2_tx_avail() below, because in
6726                 * bnx2_tx_int(), we update tx index before checking for
6727                 * netif_tx_queue_stopped().
6728                 */
6729                smp_mb();
6730                if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6731                        netif_tx_wake_queue(txq);
6732        }
6733
6734        return NETDEV_TX_OK;
6735dma_error:
6736        /* save value of frag that failed */
6737        last_frag = i;
6738
6739        /* start back at beginning and unmap skb */
6740        prod = txr->tx_prod;
6741        ring_prod = BNX2_TX_RING_IDX(prod);
6742        tx_buf = &txr->tx_buf_ring[ring_prod];
6743        tx_buf->skb = NULL;
6744        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6745                         skb_headlen(skb), DMA_TO_DEVICE);
6746
6747        /* unmap remaining mapped pages */
6748        for (i = 0; i < last_frag; i++) {
6749                prod = BNX2_NEXT_TX_BD(prod);
6750                ring_prod = BNX2_TX_RING_IDX(prod);
6751                tx_buf = &txr->tx_buf_ring[ring_prod];
6752                dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6753                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
6754                               DMA_TO_DEVICE);
6755        }
6756
6757        dev_kfree_skb_any(skb);
6758        return NETDEV_TX_OK;
6759}
6760
6761/* Called with rtnl_lock */
6762static int
6763bnx2_close(struct net_device *dev)
6764{
6765        struct bnx2 *bp = netdev_priv(dev);
6766
6767        bnx2_disable_int_sync(bp);
6768        bnx2_napi_disable(bp);
6769        netif_tx_disable(dev);
6770        del_timer_sync(&bp->timer);
6771        bnx2_shutdown_chip(bp);
6772        bnx2_free_irq(bp);
6773        bnx2_free_skbs(bp);
6774        bnx2_free_mem(bp);
6775        bnx2_del_napi(bp);
6776        bp->link_up = 0;
6777        netif_carrier_off(bp->dev);
6778        return 0;
6779}
6780
6781static void
6782bnx2_save_stats(struct bnx2 *bp)
6783{
6784        u32 *hw_stats = (u32 *) bp->stats_blk;
6785        u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6786        int i;
6787
6788        /* The 1st 10 counters are 64-bit counters */
6789        for (i = 0; i < 20; i += 2) {
6790                u32 hi;
6791                u64 lo;
6792
6793                hi = temp_stats[i] + hw_stats[i];
6794                lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6795                if (lo > 0xffffffff)
6796                        hi++;
6797                temp_stats[i] = hi;
6798                temp_stats[i + 1] = lo & 0xffffffff;
6799        }
6800
6801        for ( ; i < sizeof(struct statistics_block) / 4; i++)
6802                temp_stats[i] += hw_stats[i];
6803}
6804
6805#define GET_64BIT_NET_STATS64(ctr)              \
6806        (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6807
6808#define GET_64BIT_NET_STATS(ctr)                                \
6809        GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6810        GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6811
6812#define GET_32BIT_NET_STATS(ctr)                                \
6813        (unsigned long) (bp->stats_blk->ctr +                   \
6814                         bp->temp_stats_blk->ctr)
6815
6816static void
6817bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6818{
6819        struct bnx2 *bp = netdev_priv(dev);
6820
6821        if (!bp->stats_blk)
6822                return;
6823
6824        net_stats->rx_packets =
6825                GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6826                GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6827                GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6828
6829        net_stats->tx_packets =
6830                GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6831                GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6832                GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6833
6834        net_stats->rx_bytes =
6835                GET_64BIT_NET_STATS(stat_IfHCInOctets);
6836
6837        net_stats->tx_bytes =
6838                GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6839
6840        net_stats->multicast =
6841                GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6842
6843        net_stats->collisions =
6844                GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6845
6846        net_stats->rx_length_errors =
6847                GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6848                GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6849
6850        net_stats->rx_over_errors =
6851                GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6852                GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6853
6854        net_stats->rx_frame_errors =
6855                GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6856
6857        net_stats->rx_crc_errors =
6858                GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6859
6860        net_stats->rx_errors = net_stats->rx_length_errors +
6861                net_stats->rx_over_errors + net_stats->rx_frame_errors +
6862                net_stats->rx_crc_errors;
6863
6864        net_stats->tx_aborted_errors =
6865                GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6866                GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6867
6868        if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6869            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6870                net_stats->tx_carrier_errors = 0;
6871        else {
6872                net_stats->tx_carrier_errors =
6873                        GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6874        }
6875
6876        net_stats->tx_errors =
6877                GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6878                net_stats->tx_aborted_errors +
6879                net_stats->tx_carrier_errors;
6880
6881        net_stats->rx_missed_errors =
6882                GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6883                GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6884                GET_32BIT_NET_STATS(stat_FwRxDrop);
6885
6886}
6887
6888/* All ethtool functions called with rtnl_lock */
6889
6890static int
6891bnx2_get_link_ksettings(struct net_device *dev,
6892                        struct ethtool_link_ksettings *cmd)
6893{
6894        struct bnx2 *bp = netdev_priv(dev);
6895        int support_serdes = 0, support_copper = 0;
6896        u32 supported, advertising;
6897
6898        supported = SUPPORTED_Autoneg;
6899        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6900                support_serdes = 1;
6901                support_copper = 1;
6902        } else if (bp->phy_port == PORT_FIBRE)
6903                support_serdes = 1;
6904        else
6905                support_copper = 1;
6906
6907        if (support_serdes) {
6908                supported |= SUPPORTED_1000baseT_Full |
6909                        SUPPORTED_FIBRE;
6910                if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6911                        supported |= SUPPORTED_2500baseX_Full;
6912        }
6913        if (support_copper) {
6914                supported |= SUPPORTED_10baseT_Half |
6915                        SUPPORTED_10baseT_Full |
6916                        SUPPORTED_100baseT_Half |
6917                        SUPPORTED_100baseT_Full |
6918                        SUPPORTED_1000baseT_Full |
6919                        SUPPORTED_TP;
6920        }
6921
6922        spin_lock_bh(&bp->phy_lock);
6923        cmd->base.port = bp->phy_port;
6924        advertising = bp->advertising;
6925
6926        if (bp->autoneg & AUTONEG_SPEED) {
6927                cmd->base.autoneg = AUTONEG_ENABLE;
6928        } else {
6929                cmd->base.autoneg = AUTONEG_DISABLE;
6930        }
6931
6932        if (netif_carrier_ok(dev)) {
6933                cmd->base.speed = bp->line_speed;
6934                cmd->base.duplex = bp->duplex;
6935                if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6936                        if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6937                                cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6938                        else
6939                                cmd->base.eth_tp_mdix = ETH_TP_MDI;
6940                }
6941        }
6942        else {
6943                cmd->base.speed = SPEED_UNKNOWN;
6944                cmd->base.duplex = DUPLEX_UNKNOWN;
6945        }
6946        spin_unlock_bh(&bp->phy_lock);
6947
6948        cmd->base.phy_address = bp->phy_addr;
6949
6950        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6951                                                supported);
6952        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6953                                                advertising);
6954
6955        return 0;
6956}
6957
6958static int
6959bnx2_set_link_ksettings(struct net_device *dev,
6960                        const struct ethtool_link_ksettings *cmd)
6961{
6962        struct bnx2 *bp = netdev_priv(dev);
6963        u8 autoneg = bp->autoneg;
6964        u8 req_duplex = bp->req_duplex;
6965        u16 req_line_speed = bp->req_line_speed;
6966        u32 advertising = bp->advertising;
6967        int err = -EINVAL;
6968
6969        spin_lock_bh(&bp->phy_lock);
6970
6971        if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6972                goto err_out_unlock;
6973
6974        if (cmd->base.port != bp->phy_port &&
6975            !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6976                goto err_out_unlock;
6977
6978        /* If device is down, we can store the settings only if the user
6979         * is setting the currently active port.
6980         */
6981        if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6982                goto err_out_unlock;
6983
6984        if (cmd->base.autoneg == AUTONEG_ENABLE) {
6985                autoneg |= AUTONEG_SPEED;
6986
6987                ethtool_convert_link_mode_to_legacy_u32(
6988                        &advertising, cmd->link_modes.advertising);
6989
6990                if (cmd->base.port == PORT_TP) {
6991                        advertising &= ETHTOOL_ALL_COPPER_SPEED;
6992                        if (!advertising)
6993                                advertising = ETHTOOL_ALL_COPPER_SPEED;
6994                } else {
6995                        advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6996                        if (!advertising)
6997                                advertising = ETHTOOL_ALL_FIBRE_SPEED;
6998                }
6999                advertising |= ADVERTISED_Autoneg;
7000        }
7001        else {
7002                u32 speed = cmd->base.speed;
7003
7004                if (cmd->base.port == PORT_FIBRE) {
7005                        if ((speed != SPEED_1000 &&
7006                             speed != SPEED_2500) ||
7007                            (cmd->base.duplex != DUPLEX_FULL))
7008                                goto err_out_unlock;
7009
7010                        if (speed == SPEED_2500 &&
7011                            !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7012                                goto err_out_unlock;
7013                } else if (speed == SPEED_1000 || speed == SPEED_2500)
7014                        goto err_out_unlock;
7015
7016                autoneg &= ~AUTONEG_SPEED;
7017                req_line_speed = speed;
7018                req_duplex = cmd->base.duplex;
7019                advertising = 0;
7020        }
7021
7022        bp->autoneg = autoneg;
7023        bp->advertising = advertising;
7024        bp->req_line_speed = req_line_speed;
7025        bp->req_duplex = req_duplex;
7026
7027        err = 0;
7028        /* If device is down, the new settings will be picked up when it is
7029         * brought up.
7030         */
7031        if (netif_running(dev))
7032                err = bnx2_setup_phy(bp, cmd->base.port);
7033
7034err_out_unlock:
7035        spin_unlock_bh(&bp->phy_lock);
7036
7037        return err;
7038}
7039
7040static void
7041bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7042{
7043        struct bnx2 *bp = netdev_priv(dev);
7044
7045        strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7046        strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7047        strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7048}
7049
7050#define BNX2_REGDUMP_LEN                (32 * 1024)
7051
7052static int
7053bnx2_get_regs_len(struct net_device *dev)
7054{
7055        return BNX2_REGDUMP_LEN;
7056}
7057
7058static void
7059bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7060{
7061        u32 *p = _p, i, offset;
7062        u8 *orig_p = _p;
7063        struct bnx2 *bp = netdev_priv(dev);
7064        static const u32 reg_boundaries[] = {
7065                0x0000, 0x0098, 0x0400, 0x045c,
7066                0x0800, 0x0880, 0x0c00, 0x0c10,
7067                0x0c30, 0x0d08, 0x1000, 0x101c,
7068                0x1040, 0x1048, 0x1080, 0x10a4,
7069                0x1400, 0x1490, 0x1498, 0x14f0,
7070                0x1500, 0x155c, 0x1580, 0x15dc,
7071                0x1600, 0x1658, 0x1680, 0x16d8,
7072                0x1800, 0x1820, 0x1840, 0x1854,
7073                0x1880, 0x1894, 0x1900, 0x1984,
7074                0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7075                0x1c80, 0x1c94, 0x1d00, 0x1d84,
7076                0x2000, 0x2030, 0x23c0, 0x2400,
7077                0x2800, 0x2820, 0x2830, 0x2850,
7078                0x2b40, 0x2c10, 0x2fc0, 0x3058,
7079                0x3c00, 0x3c94, 0x4000, 0x4010,
7080                0x4080, 0x4090, 0x43c0, 0x4458,
7081                0x4c00, 0x4c18, 0x4c40, 0x4c54,
7082                0x4fc0, 0x5010, 0x53c0, 0x5444,
7083                0x5c00, 0x5c18, 0x5c80, 0x5c90,
7084                0x5fc0, 0x6000, 0x6400, 0x6428,
7085                0x6800, 0x6848, 0x684c, 0x6860,
7086                0x6888, 0x6910, 0x8000
7087        };
7088
7089        regs->version = 0;
7090
7091        memset(p, 0, BNX2_REGDUMP_LEN);
7092
7093        if (!netif_running(bp->dev))
7094                return;
7095
7096        i = 0;
7097        offset = reg_boundaries[0];
7098        p += offset;
7099        while (offset < BNX2_REGDUMP_LEN) {
7100                *p++ = BNX2_RD(bp, offset);
7101                offset += 4;
7102                if (offset == reg_boundaries[i + 1]) {
7103                        offset = reg_boundaries[i + 2];
7104                        p = (u32 *) (orig_p + offset);
7105                        i += 2;
7106                }
7107        }
7108}
7109
7110static void
7111bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7112{
7113        struct bnx2 *bp = netdev_priv(dev);
7114
7115        if (bp->flags & BNX2_FLAG_NO_WOL) {
7116                wol->supported = 0;
7117                wol->wolopts = 0;
7118        }
7119        else {
7120                wol->supported = WAKE_MAGIC;
7121                if (bp->wol)
7122                        wol->wolopts = WAKE_MAGIC;
7123                else
7124                        wol->wolopts = 0;
7125        }
7126        memset(&wol->sopass, 0, sizeof(wol->sopass));
7127}
7128
7129static int
7130bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7131{
7132        struct bnx2 *bp = netdev_priv(dev);
7133
7134        if (wol->wolopts & ~WAKE_MAGIC)
7135                return -EINVAL;
7136
7137        if (wol->wolopts & WAKE_MAGIC) {
7138                if (bp->flags & BNX2_FLAG_NO_WOL)
7139                        return -EINVAL;
7140
7141                bp->wol = 1;
7142        }
7143        else {
7144                bp->wol = 0;
7145        }
7146
7147        device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7148
7149        return 0;
7150}
7151
7152static int
7153bnx2_nway_reset(struct net_device *dev)
7154{
7155        struct bnx2 *bp = netdev_priv(dev);
7156        u32 bmcr;
7157
7158        if (!netif_running(dev))
7159                return -EAGAIN;
7160
7161        if (!(bp->autoneg & AUTONEG_SPEED)) {
7162                return -EINVAL;
7163        }
7164
7165        spin_lock_bh(&bp->phy_lock);
7166
7167        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7168                int rc;
7169
7170                rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7171                spin_unlock_bh(&bp->phy_lock);
7172                return rc;
7173        }
7174
7175        /* Force a link down visible on the other side */
7176        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7177                bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7178                spin_unlock_bh(&bp->phy_lock);
7179
7180                msleep(20);
7181
7182                spin_lock_bh(&bp->phy_lock);
7183
7184                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7185                bp->serdes_an_pending = 1;
7186                mod_timer(&bp->timer, jiffies + bp->current_interval);
7187        }
7188
7189        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7190        bmcr &= ~BMCR_LOOPBACK;
7191        bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7192
7193        spin_unlock_bh(&bp->phy_lock);
7194
7195        return 0;
7196}
7197
7198static u32
7199bnx2_get_link(struct net_device *dev)
7200{
7201        struct bnx2 *bp = netdev_priv(dev);
7202
7203        return bp->link_up;
7204}
7205
7206static int
7207bnx2_get_eeprom_len(struct net_device *dev)
7208{
7209        struct bnx2 *bp = netdev_priv(dev);
7210
7211        if (!bp->flash_info)
7212                return 0;
7213
7214        return (int) bp->flash_size;
7215}
7216
7217static int
7218bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7219                u8 *eebuf)
7220{
7221        struct bnx2 *bp = netdev_priv(dev);
7222        int rc;
7223
7224        /* parameters already validated in ethtool_get_eeprom */
7225
7226        rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7227
7228        return rc;
7229}
7230
7231static int
7232bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7233                u8 *eebuf)
7234{
7235        struct bnx2 *bp = netdev_priv(dev);
7236        int rc;
7237
7238        /* parameters already validated in ethtool_set_eeprom */
7239
7240        rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7241
7242        return rc;
7243}
7244
7245static int bnx2_get_coalesce(struct net_device *dev,
7246                             struct ethtool_coalesce *coal,
7247                             struct kernel_ethtool_coalesce *kernel_coal,
7248                             struct netlink_ext_ack *extack)
7249{
7250        struct bnx2 *bp = netdev_priv(dev);
7251
7252        memset(coal, 0, sizeof(struct ethtool_coalesce));
7253
7254        coal->rx_coalesce_usecs = bp->rx_ticks;
7255        coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7256        coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7257        coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7258
7259        coal->tx_coalesce_usecs = bp->tx_ticks;
7260        coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7261        coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7262        coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7263
7264        coal->stats_block_coalesce_usecs = bp->stats_ticks;
7265
7266        return 0;
7267}
7268
7269static int bnx2_set_coalesce(struct net_device *dev,
7270                             struct ethtool_coalesce *coal,
7271                             struct kernel_ethtool_coalesce *kernel_coal,
7272                             struct netlink_ext_ack *extack)
7273{
7274        struct bnx2 *bp = netdev_priv(dev);
7275
7276        bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7277        if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7278
7279        bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7280        if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7281
7282        bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7283        if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7284
7285        bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7286        if (bp->rx_quick_cons_trip_int > 0xff)
7287                bp->rx_quick_cons_trip_int = 0xff;
7288
7289        bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7290        if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7291
7292        bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7293        if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7294
7295        bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7296        if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7297
7298        bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7299        if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7300                0xff;
7301
7302        bp->stats_ticks = coal->stats_block_coalesce_usecs;
7303        if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7304                if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7305                        bp->stats_ticks = USEC_PER_SEC;
7306        }
7307        if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7308                bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7309        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310
7311        if (netif_running(bp->dev)) {
7312                bnx2_netif_stop(bp, true);
7313                bnx2_init_nic(bp, 0);
7314                bnx2_netif_start(bp, true);
7315        }
7316
7317        return 0;
7318}
7319
7320static void
7321bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7322{
7323        struct bnx2 *bp = netdev_priv(dev);
7324
7325        ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7326        ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7327
7328        ering->rx_pending = bp->rx_ring_size;
7329        ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7330
7331        ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7332        ering->tx_pending = bp->tx_ring_size;
7333}
7334
7335static int
7336bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7337{
7338        if (netif_running(bp->dev)) {
7339                /* Reset will erase chipset stats; save them */
7340                bnx2_save_stats(bp);
7341
7342                bnx2_netif_stop(bp, true);
7343                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7344                if (reset_irq) {
7345                        bnx2_free_irq(bp);
7346                        bnx2_del_napi(bp);
7347                } else {
7348                        __bnx2_free_irq(bp);
7349                }
7350                bnx2_free_skbs(bp);
7351                bnx2_free_mem(bp);
7352        }
7353
7354        bnx2_set_rx_ring_size(bp, rx);
7355        bp->tx_ring_size = tx;
7356
7357        if (netif_running(bp->dev)) {
7358                int rc = 0;
7359
7360                if (reset_irq) {
7361                        rc = bnx2_setup_int_mode(bp, disable_msi);
7362                        bnx2_init_napi(bp);
7363                }
7364
7365                if (!rc)
7366                        rc = bnx2_alloc_mem(bp);
7367
7368                if (!rc)
7369                        rc = bnx2_request_irq(bp);
7370
7371                if (!rc)
7372                        rc = bnx2_init_nic(bp, 0);
7373
7374                if (rc) {
7375                        bnx2_napi_enable(bp);
7376                        dev_close(bp->dev);
7377                        return rc;
7378                }
7379#ifdef BCM_CNIC
7380                mutex_lock(&bp->cnic_lock);
7381                /* Let cnic know about the new status block. */
7382                if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7383                        bnx2_setup_cnic_irq_info(bp);
7384                mutex_unlock(&bp->cnic_lock);
7385#endif
7386                bnx2_netif_start(bp, true);
7387        }
7388        return 0;
7389}
7390
7391static int
7392bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7393{
7394        struct bnx2 *bp = netdev_priv(dev);
7395        int rc;
7396
7397        if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7398                (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7399                (ering->tx_pending <= MAX_SKB_FRAGS)) {
7400
7401                return -EINVAL;
7402        }
7403        rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7404                                   false);
7405        return rc;
7406}
7407
7408static void
7409bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7410{
7411        struct bnx2 *bp = netdev_priv(dev);
7412
7413        epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7414        epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7415        epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7416}
7417
7418static int
7419bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7420{
7421        struct bnx2 *bp = netdev_priv(dev);
7422
7423        bp->req_flow_ctrl = 0;
7424        if (epause->rx_pause)
7425                bp->req_flow_ctrl |= FLOW_CTRL_RX;
7426        if (epause->tx_pause)
7427                bp->req_flow_ctrl |= FLOW_CTRL_TX;
7428
7429        if (epause->autoneg) {
7430                bp->autoneg |= AUTONEG_FLOW_CTRL;
7431        }
7432        else {
7433                bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7434        }
7435
7436        if (netif_running(dev)) {
7437                spin_lock_bh(&bp->phy_lock);
7438                bnx2_setup_phy(bp, bp->phy_port);
7439                spin_unlock_bh(&bp->phy_lock);
7440        }
7441
7442        return 0;
7443}
7444
7445static struct {
7446        char string[ETH_GSTRING_LEN];
7447} bnx2_stats_str_arr[] = {
7448        { "rx_bytes" },
7449        { "rx_error_bytes" },
7450        { "tx_bytes" },
7451        { "tx_error_bytes" },
7452        { "rx_ucast_packets" },
7453        { "rx_mcast_packets" },
7454        { "rx_bcast_packets" },
7455        { "tx_ucast_packets" },
7456        { "tx_mcast_packets" },
7457        { "tx_bcast_packets" },
7458        { "tx_mac_errors" },
7459        { "tx_carrier_errors" },
7460        { "rx_crc_errors" },
7461        { "rx_align_errors" },
7462        { "tx_single_collisions" },
7463        { "tx_multi_collisions" },
7464        { "tx_deferred" },
7465        { "tx_excess_collisions" },
7466        { "tx_late_collisions" },
7467        { "tx_total_collisions" },
7468        { "rx_fragments" },
7469        { "rx_jabbers" },
7470        { "rx_undersize_packets" },
7471        { "rx_oversize_packets" },
7472        { "rx_64_byte_packets" },
7473        { "rx_65_to_127_byte_packets" },
7474        { "rx_128_to_255_byte_packets" },
7475        { "rx_256_to_511_byte_packets" },
7476        { "rx_512_to_1023_byte_packets" },
7477        { "rx_1024_to_1522_byte_packets" },
7478        { "rx_1523_to_9022_byte_packets" },
7479        { "tx_64_byte_packets" },
7480        { "tx_65_to_127_byte_packets" },
7481        { "tx_128_to_255_byte_packets" },
7482        { "tx_256_to_511_byte_packets" },
7483        { "tx_512_to_1023_byte_packets" },
7484        { "tx_1024_to_1522_byte_packets" },
7485        { "tx_1523_to_9022_byte_packets" },
7486        { "rx_xon_frames" },
7487        { "rx_xoff_frames" },
7488        { "tx_xon_frames" },
7489        { "tx_xoff_frames" },
7490        { "rx_mac_ctrl_frames" },
7491        { "rx_filtered_packets" },
7492        { "rx_ftq_discards" },
7493        { "rx_discards" },
7494        { "rx_fw_discards" },
7495};
7496
7497#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7498
7499#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7500
7501static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7502    STATS_OFFSET32(stat_IfHCInOctets_hi),
7503    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7504    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7505    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7506    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7507    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7508    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7509    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7510    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7511    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7512    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7513    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7514    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7515    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7516    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7517    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7518    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7519    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7520    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7521    STATS_OFFSET32(stat_EtherStatsCollisions),
7522    STATS_OFFSET32(stat_EtherStatsFragments),
7523    STATS_OFFSET32(stat_EtherStatsJabbers),
7524    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7525    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7526    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7527    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7528    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7529    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7530    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7531    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7532    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7533    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7534    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7535    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7536    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7537    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7538    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7539    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7540    STATS_OFFSET32(stat_XonPauseFramesReceived),
7541    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7542    STATS_OFFSET32(stat_OutXonSent),
7543    STATS_OFFSET32(stat_OutXoffSent),
7544    STATS_OFFSET32(stat_MacControlFramesReceived),
7545    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7546    STATS_OFFSET32(stat_IfInFTQDiscards),
7547    STATS_OFFSET32(stat_IfInMBUFDiscards),
7548    STATS_OFFSET32(stat_FwRxDrop),
7549};
7550
7551/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7552 * skipped because of errata.
7553 */
7554static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7555        8,0,8,8,8,8,8,8,8,8,
7556        4,0,4,4,4,4,4,4,4,4,
7557        4,4,4,4,4,4,4,4,4,4,
7558        4,4,4,4,4,4,4,4,4,4,
7559        4,4,4,4,4,4,4,
7560};
7561
7562static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7563        8,0,8,8,8,8,8,8,8,8,
7564        4,4,4,4,4,4,4,4,4,4,
7565        4,4,4,4,4,4,4,4,4,4,
7566        4,4,4,4,4,4,4,4,4,4,
7567        4,4,4,4,4,4,4,
7568};
7569
7570#define BNX2_NUM_TESTS 6
7571
7572static struct {
7573        char string[ETH_GSTRING_LEN];
7574} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7575        { "register_test (offline)" },
7576        { "memory_test (offline)" },
7577        { "loopback_test (offline)" },
7578        { "nvram_test (online)" },
7579        { "interrupt_test (online)" },
7580        { "link_test (online)" },
7581};
7582
7583static int
7584bnx2_get_sset_count(struct net_device *dev, int sset)
7585{
7586        switch (sset) {
7587        case ETH_SS_TEST:
7588                return BNX2_NUM_TESTS;
7589        case ETH_SS_STATS:
7590                return BNX2_NUM_STATS;
7591        default:
7592                return -EOPNOTSUPP;
7593        }
7594}
7595
7596static void
7597bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7598{
7599        struct bnx2 *bp = netdev_priv(dev);
7600
7601        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7602        if (etest->flags & ETH_TEST_FL_OFFLINE) {
7603                int i;
7604
7605                bnx2_netif_stop(bp, true);
7606                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7607                bnx2_free_skbs(bp);
7608
7609                if (bnx2_test_registers(bp) != 0) {
7610                        buf[0] = 1;
7611                        etest->flags |= ETH_TEST_FL_FAILED;
7612                }
7613                if (bnx2_test_memory(bp) != 0) {
7614                        buf[1] = 1;
7615                        etest->flags |= ETH_TEST_FL_FAILED;
7616                }
7617                if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7618                        etest->flags |= ETH_TEST_FL_FAILED;
7619
7620                if (!netif_running(bp->dev))
7621                        bnx2_shutdown_chip(bp);
7622                else {
7623                        bnx2_init_nic(bp, 1);
7624                        bnx2_netif_start(bp, true);
7625                }
7626
7627                /* wait for link up */
7628                for (i = 0; i < 7; i++) {
7629                        if (bp->link_up)
7630                                break;
7631                        msleep_interruptible(1000);
7632                }
7633        }
7634
7635        if (bnx2_test_nvram(bp) != 0) {
7636                buf[3] = 1;
7637                etest->flags |= ETH_TEST_FL_FAILED;
7638        }
7639        if (bnx2_test_intr(bp) != 0) {
7640                buf[4] = 1;
7641                etest->flags |= ETH_TEST_FL_FAILED;
7642        }
7643
7644        if (bnx2_test_link(bp) != 0) {
7645                buf[5] = 1;
7646                etest->flags |= ETH_TEST_FL_FAILED;
7647
7648        }
7649}
7650
7651static void
7652bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7653{
7654        switch (stringset) {
7655        case ETH_SS_STATS:
7656                memcpy(buf, bnx2_stats_str_arr,
7657                        sizeof(bnx2_stats_str_arr));
7658                break;
7659        case ETH_SS_TEST:
7660                memcpy(buf, bnx2_tests_str_arr,
7661                        sizeof(bnx2_tests_str_arr));
7662                break;
7663        }
7664}
7665
7666static void
7667bnx2_get_ethtool_stats(struct net_device *dev,
7668                struct ethtool_stats *stats, u64 *buf)
7669{
7670        struct bnx2 *bp = netdev_priv(dev);
7671        int i;
7672        u32 *hw_stats = (u32 *) bp->stats_blk;
7673        u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7674        u8 *stats_len_arr = NULL;
7675
7676        if (!hw_stats) {
7677                memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7678                return;
7679        }
7680
7681        if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7682            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7683            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7684            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7685                stats_len_arr = bnx2_5706_stats_len_arr;
7686        else
7687                stats_len_arr = bnx2_5708_stats_len_arr;
7688
7689        for (i = 0; i < BNX2_NUM_STATS; i++) {
7690                unsigned long offset;
7691
7692                if (stats_len_arr[i] == 0) {
7693                        /* skip this counter */
7694                        buf[i] = 0;
7695                        continue;
7696                }
7697
7698                offset = bnx2_stats_offset_arr[i];
7699                if (stats_len_arr[i] == 4) {
7700                        /* 4-byte counter */
7701                        buf[i] = (u64) *(hw_stats + offset) +
7702                                 *(temp_stats + offset);
7703                        continue;
7704                }
7705                /* 8-byte counter */
7706                buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7707                         *(hw_stats + offset + 1) +
7708                         (((u64) *(temp_stats + offset)) << 32) +
7709                         *(temp_stats + offset + 1);
7710        }
7711}
7712
7713static int
7714bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7715{
7716        struct bnx2 *bp = netdev_priv(dev);
7717
7718        switch (state) {
7719        case ETHTOOL_ID_ACTIVE:
7720                bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7721                BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7722                return 1;       /* cycle on/off once per second */
7723
7724        case ETHTOOL_ID_ON:
7725                BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7726                        BNX2_EMAC_LED_1000MB_OVERRIDE |
7727                        BNX2_EMAC_LED_100MB_OVERRIDE |
7728                        BNX2_EMAC_LED_10MB_OVERRIDE |
7729                        BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7730                        BNX2_EMAC_LED_TRAFFIC);
7731                break;
7732
7733        case ETHTOOL_ID_OFF:
7734                BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7735                break;
7736
7737        case ETHTOOL_ID_INACTIVE:
7738                BNX2_WR(bp, BNX2_EMAC_LED, 0);
7739                BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7740                break;
7741        }
7742
7743        return 0;
7744}
7745
7746static int
7747bnx2_set_features(struct net_device *dev, netdev_features_t features)
7748{
7749        struct bnx2 *bp = netdev_priv(dev);
7750
7751        /* TSO with VLAN tag won't work with current firmware */
7752        if (features & NETIF_F_HW_VLAN_CTAG_TX)
7753                dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7754        else
7755                dev->vlan_features &= ~NETIF_F_ALL_TSO;
7756
7757        if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7758            !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7759            netif_running(dev)) {
7760                bnx2_netif_stop(bp, false);
7761                dev->features = features;
7762                bnx2_set_rx_mode(dev);
7763                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7764                bnx2_netif_start(bp, false);
7765                return 1;
7766        }
7767
7768        return 0;
7769}
7770
7771static void bnx2_get_channels(struct net_device *dev,
7772                              struct ethtool_channels *channels)
7773{
7774        struct bnx2 *bp = netdev_priv(dev);
7775        u32 max_rx_rings = 1;
7776        u32 max_tx_rings = 1;
7777
7778        if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7779                max_rx_rings = RX_MAX_RINGS;
7780                max_tx_rings = TX_MAX_RINGS;
7781        }
7782
7783        channels->max_rx = max_rx_rings;
7784        channels->max_tx = max_tx_rings;
7785        channels->max_other = 0;
7786        channels->max_combined = 0;
7787        channels->rx_count = bp->num_rx_rings;
7788        channels->tx_count = bp->num_tx_rings;
7789        channels->other_count = 0;
7790        channels->combined_count = 0;
7791}
7792
7793static int bnx2_set_channels(struct net_device *dev,
7794                              struct ethtool_channels *channels)
7795{
7796        struct bnx2 *bp = netdev_priv(dev);
7797        u32 max_rx_rings = 1;
7798        u32 max_tx_rings = 1;
7799        int rc = 0;
7800
7801        if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7802                max_rx_rings = RX_MAX_RINGS;
7803                max_tx_rings = TX_MAX_RINGS;
7804        }
7805        if (channels->rx_count > max_rx_rings ||
7806            channels->tx_count > max_tx_rings)
7807                return -EINVAL;
7808
7809        bp->num_req_rx_rings = channels->rx_count;
7810        bp->num_req_tx_rings = channels->tx_count;
7811
7812        if (netif_running(dev))
7813                rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7814                                           bp->tx_ring_size, true);
7815
7816        return rc;
7817}
7818
7819static const struct ethtool_ops bnx2_ethtool_ops = {
7820        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7821                                     ETHTOOL_COALESCE_MAX_FRAMES |
7822                                     ETHTOOL_COALESCE_USECS_IRQ |
7823                                     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7824                                     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7825        .get_drvinfo            = bnx2_get_drvinfo,
7826        .get_regs_len           = bnx2_get_regs_len,
7827        .get_regs               = bnx2_get_regs,
7828        .get_wol                = bnx2_get_wol,
7829        .set_wol                = bnx2_set_wol,
7830        .nway_reset             = bnx2_nway_reset,
7831        .get_link               = bnx2_get_link,
7832        .get_eeprom_len         = bnx2_get_eeprom_len,
7833        .get_eeprom             = bnx2_get_eeprom,
7834        .set_eeprom             = bnx2_set_eeprom,
7835        .get_coalesce           = bnx2_get_coalesce,
7836        .set_coalesce           = bnx2_set_coalesce,
7837        .get_ringparam          = bnx2_get_ringparam,
7838        .set_ringparam          = bnx2_set_ringparam,
7839        .get_pauseparam         = bnx2_get_pauseparam,
7840        .set_pauseparam         = bnx2_set_pauseparam,
7841        .self_test              = bnx2_self_test,
7842        .get_strings            = bnx2_get_strings,
7843        .set_phys_id            = bnx2_set_phys_id,
7844        .get_ethtool_stats      = bnx2_get_ethtool_stats,
7845        .get_sset_count         = bnx2_get_sset_count,
7846        .get_channels           = bnx2_get_channels,
7847        .set_channels           = bnx2_set_channels,
7848        .get_link_ksettings     = bnx2_get_link_ksettings,
7849        .set_link_ksettings     = bnx2_set_link_ksettings,
7850};
7851
7852/* Called with rtnl_lock */
7853static int
7854bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7855{
7856        struct mii_ioctl_data *data = if_mii(ifr);
7857        struct bnx2 *bp = netdev_priv(dev);
7858        int err;
7859
7860        switch(cmd) {
7861        case SIOCGMIIPHY:
7862                data->phy_id = bp->phy_addr;
7863
7864                fallthrough;
7865        case SIOCGMIIREG: {
7866                u32 mii_regval;
7867
7868                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7869                        return -EOPNOTSUPP;
7870
7871                if (!netif_running(dev))
7872                        return -EAGAIN;
7873
7874                spin_lock_bh(&bp->phy_lock);
7875                err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7876                spin_unlock_bh(&bp->phy_lock);
7877
7878                data->val_out = mii_regval;
7879
7880                return err;
7881        }
7882
7883        case SIOCSMIIREG:
7884                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7885                        return -EOPNOTSUPP;
7886
7887                if (!netif_running(dev))
7888                        return -EAGAIN;
7889
7890                spin_lock_bh(&bp->phy_lock);
7891                err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7892                spin_unlock_bh(&bp->phy_lock);
7893
7894                return err;
7895
7896        default:
7897                /* do nothing */
7898                break;
7899        }
7900        return -EOPNOTSUPP;
7901}
7902
7903/* Called with rtnl_lock */
7904static int
7905bnx2_change_mac_addr(struct net_device *dev, void *p)
7906{
7907        struct sockaddr *addr = p;
7908        struct bnx2 *bp = netdev_priv(dev);
7909
7910        if (!is_valid_ether_addr(addr->sa_data))
7911                return -EADDRNOTAVAIL;
7912
7913        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7914        if (netif_running(dev))
7915                bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7916
7917        return 0;
7918}
7919
7920/* Called with rtnl_lock */
7921static int
7922bnx2_change_mtu(struct net_device *dev, int new_mtu)
7923{
7924        struct bnx2 *bp = netdev_priv(dev);
7925
7926        dev->mtu = new_mtu;
7927        return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7928                                     false);
7929}
7930
7931#ifdef CONFIG_NET_POLL_CONTROLLER
7932static void
7933poll_bnx2(struct net_device *dev)
7934{
7935        struct bnx2 *bp = netdev_priv(dev);
7936        int i;
7937
7938        for (i = 0; i < bp->irq_nvecs; i++) {
7939                struct bnx2_irq *irq = &bp->irq_tbl[i];
7940
7941                disable_irq(irq->vector);
7942                irq->handler(irq->vector, &bp->bnx2_napi[i]);
7943                enable_irq(irq->vector);
7944        }
7945}
7946#endif
7947
7948static void
7949bnx2_get_5709_media(struct bnx2 *bp)
7950{
7951        u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7952        u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7953        u32 strap;
7954
7955        if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7956                return;
7957        else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7958                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959                return;
7960        }
7961
7962        if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7963                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7964        else
7965                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7966
7967        if (bp->func == 0) {
7968                switch (strap) {
7969                case 0x4:
7970                case 0x5:
7971                case 0x6:
7972                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7973                        return;
7974                }
7975        } else {
7976                switch (strap) {
7977                case 0x1:
7978                case 0x2:
7979                case 0x4:
7980                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7981                        return;
7982                }
7983        }
7984}
7985
7986static void
7987bnx2_get_pci_speed(struct bnx2 *bp)
7988{
7989        u32 reg;
7990
7991        reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7992        if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7993                u32 clkreg;
7994
7995                bp->flags |= BNX2_FLAG_PCIX;
7996
7997                clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7998
7999                clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8000                switch (clkreg) {
8001                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8002                        bp->bus_speed_mhz = 133;
8003                        break;
8004
8005                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8006                        bp->bus_speed_mhz = 100;
8007                        break;
8008
8009                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8010                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8011                        bp->bus_speed_mhz = 66;
8012                        break;
8013
8014                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8015                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8016                        bp->bus_speed_mhz = 50;
8017                        break;
8018
8019                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8020                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8021                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8022                        bp->bus_speed_mhz = 33;
8023                        break;
8024                }
8025        }
8026        else {
8027                if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8028                        bp->bus_speed_mhz = 66;
8029                else
8030                        bp->bus_speed_mhz = 33;
8031        }
8032
8033        if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8034                bp->flags |= BNX2_FLAG_PCI_32BIT;
8035
8036}
8037
8038static void
8039bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8040{
8041        unsigned int len;
8042        int rc, i, j;
8043        u8 *data;
8044
8045#define BNX2_VPD_NVRAM_OFFSET   0x300
8046#define BNX2_VPD_LEN            128
8047#define BNX2_MAX_VER_SLEN       30
8048
8049        data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8050        if (!data)
8051                return;
8052
8053        rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8054        if (rc)
8055                goto vpd_done;
8056
8057        for (i = 0; i < BNX2_VPD_LEN; i += 4)
8058                swab32s((u32 *)&data[i]);
8059
8060        j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8061                                         PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8062        if (j < 0)
8063                goto vpd_done;
8064
8065        if (len != 4 || memcmp(&data[j], "1028", 4))
8066                goto vpd_done;
8067
8068        j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8069                                         PCI_VPD_RO_KEYWORD_VENDOR0,
8070                                         &len);
8071        if (j < 0)
8072                goto vpd_done;
8073
8074        if (len > BNX2_MAX_VER_SLEN)
8075                goto vpd_done;
8076
8077        memcpy(bp->fw_version, &data[j], len);
8078        bp->fw_version[len] = ' ';
8079
8080vpd_done:
8081        kfree(data);
8082}
8083
8084static int
8085bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8086{
8087        struct bnx2 *bp;
8088        int rc, i, j;
8089        u32 reg;
8090        u64 dma_mask, persist_dma_mask;
8091        int err;
8092
8093        SET_NETDEV_DEV(dev, &pdev->dev);
8094        bp = netdev_priv(dev);
8095
8096        bp->flags = 0;
8097        bp->phy_flags = 0;
8098
8099        bp->temp_stats_blk =
8100                kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8101
8102        if (!bp->temp_stats_blk) {
8103                rc = -ENOMEM;
8104                goto err_out;
8105        }
8106
8107        /* enable device (incl. PCI PM wakeup), and bus-mastering */
8108        rc = pci_enable_device(pdev);
8109        if (rc) {
8110                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8111                goto err_out;
8112        }
8113
8114        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8115                dev_err(&pdev->dev,
8116                        "Cannot find PCI device base address, aborting\n");
8117                rc = -ENODEV;
8118                goto err_out_disable;
8119        }
8120
8121        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8122        if (rc) {
8123                dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8124                goto err_out_disable;
8125        }
8126
8127        pci_set_master(pdev);
8128
8129        bp->pm_cap = pdev->pm_cap;
8130        if (bp->pm_cap == 0) {
8131                dev_err(&pdev->dev,
8132                        "Cannot find power management capability, aborting\n");
8133                rc = -EIO;
8134                goto err_out_release;
8135        }
8136
8137        bp->dev = dev;
8138        bp->pdev = pdev;
8139
8140        spin_lock_init(&bp->phy_lock);
8141        spin_lock_init(&bp->indirect_lock);
8142#ifdef BCM_CNIC
8143        mutex_init(&bp->cnic_lock);
8144#endif
8145        INIT_WORK(&bp->reset_task, bnx2_reset_task);
8146
8147        bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8148                                                         TX_MAX_TSS_RINGS + 1));
8149        if (!bp->regview) {
8150                dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8151                rc = -ENOMEM;
8152                goto err_out_release;
8153        }
8154
8155        /* Configure byte swap and enable write to the reg_window registers.
8156         * Rely on CPU to do target byte swapping on big endian systems
8157         * The chip's target access swapping will not swap all accesses
8158         */
8159        BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8160                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8161                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8162
8163        bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8164
8165        if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8166                if (!pci_is_pcie(pdev)) {
8167                        dev_err(&pdev->dev, "Not PCIE, aborting\n");
8168                        rc = -EIO;
8169                        goto err_out_unmap;
8170                }
8171                bp->flags |= BNX2_FLAG_PCIE;
8172                if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8173                        bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8174
8175                /* AER (Advanced Error Reporting) hooks */
8176                err = pci_enable_pcie_error_reporting(pdev);
8177                if (!err)
8178                        bp->flags |= BNX2_FLAG_AER_ENABLED;
8179
8180        } else {
8181                bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8182                if (bp->pcix_cap == 0) {
8183                        dev_err(&pdev->dev,
8184                                "Cannot find PCIX capability, aborting\n");
8185                        rc = -EIO;
8186                        goto err_out_unmap;
8187                }
8188                bp->flags |= BNX2_FLAG_BROKEN_STATS;
8189        }
8190
8191        if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8192            BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8193                if (pdev->msix_cap)
8194                        bp->flags |= BNX2_FLAG_MSIX_CAP;
8195        }
8196
8197        if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8198            BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8199                if (pdev->msi_cap)
8200                        bp->flags |= BNX2_FLAG_MSI_CAP;
8201        }
8202
8203        /* 5708 cannot support DMA addresses > 40-bit.  */
8204        if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8205                persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8206        else
8207                persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8208
8209        /* Configure DMA attributes. */
8210        if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8211                dev->features |= NETIF_F_HIGHDMA;
8212                rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8213                if (rc) {
8214                        dev_err(&pdev->dev,
8215                                "pci_set_consistent_dma_mask failed, aborting\n");
8216                        goto err_out_unmap;
8217                }
8218        } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8219                dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8220                goto err_out_unmap;
8221        }
8222
8223        if (!(bp->flags & BNX2_FLAG_PCIE))
8224                bnx2_get_pci_speed(bp);
8225
8226        /* 5706A0 may falsely detect SERR and PERR. */
8227        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8228                reg = BNX2_RD(bp, PCI_COMMAND);
8229                reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8230                BNX2_WR(bp, PCI_COMMAND, reg);
8231        } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8232                !(bp->flags & BNX2_FLAG_PCIX)) {
8233                dev_err(&pdev->dev,
8234                        "5706 A1 can only be used in a PCIX bus, aborting\n");
8235                rc = -EPERM;
8236                goto err_out_unmap;
8237        }
8238
8239        bnx2_init_nvram(bp);
8240
8241        reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8242
8243        if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8244                bp->func = 1;
8245
8246        if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8247            BNX2_SHM_HDR_SIGNATURE_SIG) {
8248                u32 off = bp->func << 2;
8249
8250                bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8251        } else
8252                bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8253
8254        /* Get the permanent MAC address.  First we need to make sure the
8255         * firmware is actually running.
8256         */
8257        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8258
8259        if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8260            BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8261                dev_err(&pdev->dev, "Firmware not running, aborting\n");
8262                rc = -ENODEV;
8263                goto err_out_unmap;
8264        }
8265
8266        bnx2_read_vpd_fw_ver(bp);
8267
8268        j = strlen(bp->fw_version);
8269        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8270        for (i = 0; i < 3 && j < 24; i++) {
8271                u8 num, k, skip0;
8272
8273                if (i == 0) {
8274                        bp->fw_version[j++] = 'b';
8275                        bp->fw_version[j++] = 'c';
8276                        bp->fw_version[j++] = ' ';
8277                }
8278                num = (u8) (reg >> (24 - (i * 8)));
8279                for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8280                        if (num >= k || !skip0 || k == 1) {
8281                                bp->fw_version[j++] = (num / k) + '0';
8282                                skip0 = 0;
8283                        }
8284                }
8285                if (i != 2)
8286                        bp->fw_version[j++] = '.';
8287        }
8288        reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8289        if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8290                bp->wol = 1;
8291
8292        if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8293                bp->flags |= BNX2_FLAG_ASF_ENABLE;
8294
8295                for (i = 0; i < 30; i++) {
8296                        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8297                        if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8298                                break;
8299                        msleep(10);
8300                }
8301        }
8302        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8303        reg &= BNX2_CONDITION_MFW_RUN_MASK;
8304        if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8305            reg != BNX2_CONDITION_MFW_RUN_NONE) {
8306                u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8307
8308                if (j < 32)
8309                        bp->fw_version[j++] = ' ';
8310                for (i = 0; i < 3 && j < 28; i++) {
8311                        reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8312                        reg = be32_to_cpu(reg);
8313                        memcpy(&bp->fw_version[j], &reg, 4);
8314                        j += 4;
8315                }
8316        }
8317
8318        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8319        bp->mac_addr[0] = (u8) (reg >> 8);
8320        bp->mac_addr[1] = (u8) reg;
8321
8322        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8323        bp->mac_addr[2] = (u8) (reg >> 24);
8324        bp->mac_addr[3] = (u8) (reg >> 16);
8325        bp->mac_addr[4] = (u8) (reg >> 8);
8326        bp->mac_addr[5] = (u8) reg;
8327
8328        bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8329        bnx2_set_rx_ring_size(bp, 255);
8330
8331        bp->tx_quick_cons_trip_int = 2;
8332        bp->tx_quick_cons_trip = 20;
8333        bp->tx_ticks_int = 18;
8334        bp->tx_ticks = 80;
8335
8336        bp->rx_quick_cons_trip_int = 2;
8337        bp->rx_quick_cons_trip = 12;
8338        bp->rx_ticks_int = 18;
8339        bp->rx_ticks = 18;
8340
8341        bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8342
8343        bp->current_interval = BNX2_TIMER_INTERVAL;
8344
8345        bp->phy_addr = 1;
8346
8347        /* allocate stats_blk */
8348        rc = bnx2_alloc_stats_blk(dev);
8349        if (rc)
8350                goto err_out_unmap;
8351
8352        /* Disable WOL support if we are running on a SERDES chip. */
8353        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8354                bnx2_get_5709_media(bp);
8355        else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8356                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8357
8358        bp->phy_port = PORT_TP;
8359        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8360                bp->phy_port = PORT_FIBRE;
8361                reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8362                if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8363                        bp->flags |= BNX2_FLAG_NO_WOL;
8364                        bp->wol = 0;
8365                }
8366                if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8367                        /* Don't do parallel detect on this board because of
8368                         * some board problems.  The link will not go down
8369                         * if we do parallel detect.
8370                         */
8371                        if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8372                            pdev->subsystem_device == 0x310c)
8373                                bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8374                } else {
8375                        bp->phy_addr = 2;
8376                        if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8377                                bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8378                }
8379        } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8380                   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8381                bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8382        else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8383                 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8384                  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8385                bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8386
8387        bnx2_init_fw_cap(bp);
8388
8389        if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8390            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8391            (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8392            !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8393                bp->flags |= BNX2_FLAG_NO_WOL;
8394                bp->wol = 0;
8395        }
8396
8397        if (bp->flags & BNX2_FLAG_NO_WOL)
8398                device_set_wakeup_capable(&bp->pdev->dev, false);
8399        else
8400                device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8401
8402        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8403                bp->tx_quick_cons_trip_int =
8404                        bp->tx_quick_cons_trip;
8405                bp->tx_ticks_int = bp->tx_ticks;
8406                bp->rx_quick_cons_trip_int =
8407                        bp->rx_quick_cons_trip;
8408                bp->rx_ticks_int = bp->rx_ticks;
8409                bp->comp_prod_trip_int = bp->comp_prod_trip;
8410                bp->com_ticks_int = bp->com_ticks;
8411                bp->cmd_ticks_int = bp->cmd_ticks;
8412        }
8413
8414        /* Disable MSI on 5706 if AMD 8132 bridge is found.
8415         *
8416         * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8417         * with byte enables disabled on the unused 32-bit word.  This is legal
8418         * but causes problems on the AMD 8132 which will eventually stop
8419         * responding after a while.
8420         *
8421         * AMD believes this incompatibility is unique to the 5706, and
8422         * prefers to locally disable MSI rather than globally disabling it.
8423         */
8424        if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8425                struct pci_dev *amd_8132 = NULL;
8426
8427                while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8428                                                  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8429                                                  amd_8132))) {
8430
8431                        if (amd_8132->revision >= 0x10 &&
8432                            amd_8132->revision <= 0x13) {
8433                                disable_msi = 1;
8434                                pci_dev_put(amd_8132);
8435                                break;
8436                        }
8437                }
8438        }
8439
8440        bnx2_set_default_link(bp);
8441        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8442
8443        timer_setup(&bp->timer, bnx2_timer, 0);
8444        bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8445
8446#ifdef BCM_CNIC
8447        if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8448                bp->cnic_eth_dev.max_iscsi_conn =
8449                        (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8450                         BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8451        bp->cnic_probe = bnx2_cnic_probe;
8452#endif
8453        pci_save_state(pdev);
8454
8455        return 0;
8456
8457err_out_unmap:
8458        if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8459                pci_disable_pcie_error_reporting(pdev);
8460                bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8461        }
8462
8463        pci_iounmap(pdev, bp->regview);
8464        bp->regview = NULL;
8465
8466err_out_release:
8467        pci_release_regions(pdev);
8468
8469err_out_disable:
8470        pci_disable_device(pdev);
8471
8472err_out:
8473        kfree(bp->temp_stats_blk);
8474
8475        return rc;
8476}
8477
8478static char *
8479bnx2_bus_string(struct bnx2 *bp, char *str)
8480{
8481        char *s = str;
8482
8483        if (bp->flags & BNX2_FLAG_PCIE) {
8484                s += sprintf(s, "PCI Express");
8485        } else {
8486                s += sprintf(s, "PCI");
8487                if (bp->flags & BNX2_FLAG_PCIX)
8488                        s += sprintf(s, "-X");
8489                if (bp->flags & BNX2_FLAG_PCI_32BIT)
8490                        s += sprintf(s, " 32-bit");
8491                else
8492                        s += sprintf(s, " 64-bit");
8493                s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8494        }
8495        return str;
8496}
8497
8498static void
8499bnx2_del_napi(struct bnx2 *bp)
8500{
8501        int i;
8502
8503        for (i = 0; i < bp->irq_nvecs; i++)
8504                netif_napi_del(&bp->bnx2_napi[i].napi);
8505}
8506
8507static void
8508bnx2_init_napi(struct bnx2 *bp)
8509{
8510        int i;
8511
8512        for (i = 0; i < bp->irq_nvecs; i++) {
8513                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8514                int (*poll)(struct napi_struct *, int);
8515
8516                if (i == 0)
8517                        poll = bnx2_poll;
8518                else
8519                        poll = bnx2_poll_msix;
8520
8521                netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8522                bnapi->bp = bp;
8523        }
8524}
8525
8526static const struct net_device_ops bnx2_netdev_ops = {
8527        .ndo_open               = bnx2_open,
8528        .ndo_start_xmit         = bnx2_start_xmit,
8529        .ndo_stop               = bnx2_close,
8530        .ndo_get_stats64        = bnx2_get_stats64,
8531        .ndo_set_rx_mode        = bnx2_set_rx_mode,
8532        .ndo_eth_ioctl          = bnx2_ioctl,
8533        .ndo_validate_addr      = eth_validate_addr,
8534        .ndo_set_mac_address    = bnx2_change_mac_addr,
8535        .ndo_change_mtu         = bnx2_change_mtu,
8536        .ndo_set_features       = bnx2_set_features,
8537        .ndo_tx_timeout         = bnx2_tx_timeout,
8538#ifdef CONFIG_NET_POLL_CONTROLLER
8539        .ndo_poll_controller    = poll_bnx2,
8540#endif
8541};
8542
8543static int
8544bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8545{
8546        struct net_device *dev;
8547        struct bnx2 *bp;
8548        int rc;
8549        char str[40];
8550
8551        /* dev zeroed in init_etherdev */
8552        dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8553        if (!dev)
8554                return -ENOMEM;
8555
8556        rc = bnx2_init_board(pdev, dev);
8557        if (rc < 0)
8558                goto err_free;
8559
8560        dev->netdev_ops = &bnx2_netdev_ops;
8561        dev->watchdog_timeo = TX_TIMEOUT;
8562        dev->ethtool_ops = &bnx2_ethtool_ops;
8563
8564        bp = netdev_priv(dev);
8565
8566        pci_set_drvdata(pdev, dev);
8567
8568        /*
8569         * In-flight DMA from 1st kernel could continue going in kdump kernel.
8570         * New io-page table has been created before bnx2 does reset at open stage.
8571         * We have to wait for the in-flight DMA to complete to avoid it look up
8572         * into the newly created io-page table.
8573         */
8574        if (is_kdump_kernel())
8575                bnx2_wait_dma_complete(bp);
8576
8577        memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8578
8579        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8580                NETIF_F_TSO | NETIF_F_TSO_ECN |
8581                NETIF_F_RXHASH | NETIF_F_RXCSUM;
8582
8583        if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8584                dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8585
8586        dev->vlan_features = dev->hw_features;
8587        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8588        dev->features |= dev->hw_features;
8589        dev->priv_flags |= IFF_UNICAST_FLT;
8590        dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8591        dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8592
8593        if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8594                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8595
8596        if ((rc = register_netdev(dev))) {
8597                dev_err(&pdev->dev, "Cannot register net device\n");
8598                goto error;
8599        }
8600
8601        netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8602                    "node addr %pM\n", board_info[ent->driver_data].name,
8603                    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8604                    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8605                    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8606                    pdev->irq, dev->dev_addr);
8607
8608        return 0;
8609
8610error:
8611        pci_iounmap(pdev, bp->regview);
8612        pci_release_regions(pdev);
8613        pci_disable_device(pdev);
8614err_free:
8615        bnx2_free_stats_blk(dev);
8616        free_netdev(dev);
8617        return rc;
8618}
8619
8620static void
8621bnx2_remove_one(struct pci_dev *pdev)
8622{
8623        struct net_device *dev = pci_get_drvdata(pdev);
8624        struct bnx2 *bp = netdev_priv(dev);
8625
8626        unregister_netdev(dev);
8627
8628        del_timer_sync(&bp->timer);
8629        cancel_work_sync(&bp->reset_task);
8630
8631        pci_iounmap(bp->pdev, bp->regview);
8632
8633        bnx2_free_stats_blk(dev);
8634        kfree(bp->temp_stats_blk);
8635
8636        if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8637                pci_disable_pcie_error_reporting(pdev);
8638                bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8639        }
8640
8641        bnx2_release_firmware(bp);
8642
8643        free_netdev(dev);
8644
8645        pci_release_regions(pdev);
8646        pci_disable_device(pdev);
8647}
8648
8649#ifdef CONFIG_PM_SLEEP
8650static int
8651bnx2_suspend(struct device *device)
8652{
8653        struct net_device *dev = dev_get_drvdata(device);
8654        struct bnx2 *bp = netdev_priv(dev);
8655
8656        if (netif_running(dev)) {
8657                cancel_work_sync(&bp->reset_task);
8658                bnx2_netif_stop(bp, true);
8659                netif_device_detach(dev);
8660                del_timer_sync(&bp->timer);
8661                bnx2_shutdown_chip(bp);
8662                __bnx2_free_irq(bp);
8663                bnx2_free_skbs(bp);
8664        }
8665        bnx2_setup_wol(bp);
8666        return 0;
8667}
8668
8669static int
8670bnx2_resume(struct device *device)
8671{
8672        struct net_device *dev = dev_get_drvdata(device);
8673        struct bnx2 *bp = netdev_priv(dev);
8674
8675        if (!netif_running(dev))
8676                return 0;
8677
8678        bnx2_set_power_state(bp, PCI_D0);
8679        netif_device_attach(dev);
8680        bnx2_request_irq(bp);
8681        bnx2_init_nic(bp, 1);
8682        bnx2_netif_start(bp, true);
8683        return 0;
8684}
8685
8686static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8687#define BNX2_PM_OPS (&bnx2_pm_ops)
8688
8689#else
8690
8691#define BNX2_PM_OPS NULL
8692
8693#endif /* CONFIG_PM_SLEEP */
8694/**
8695 * bnx2_io_error_detected - called when PCI error is detected
8696 * @pdev: Pointer to PCI device
8697 * @state: The current pci connection state
8698 *
8699 * This function is called after a PCI bus error affecting
8700 * this device has been detected.
8701 */
8702static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8703                                               pci_channel_state_t state)
8704{
8705        struct net_device *dev = pci_get_drvdata(pdev);
8706        struct bnx2 *bp = netdev_priv(dev);
8707
8708        rtnl_lock();
8709        netif_device_detach(dev);
8710
8711        if (state == pci_channel_io_perm_failure) {
8712                rtnl_unlock();
8713                return PCI_ERS_RESULT_DISCONNECT;
8714        }
8715
8716        if (netif_running(dev)) {
8717                bnx2_netif_stop(bp, true);
8718                del_timer_sync(&bp->timer);
8719                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8720        }
8721
8722        pci_disable_device(pdev);
8723        rtnl_unlock();
8724
8725        /* Request a slot slot reset. */
8726        return PCI_ERS_RESULT_NEED_RESET;
8727}
8728
8729/**
8730 * bnx2_io_slot_reset - called after the pci bus has been reset.
8731 * @pdev: Pointer to PCI device
8732 *
8733 * Restart the card from scratch, as if from a cold-boot.
8734 */
8735static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8736{
8737        struct net_device *dev = pci_get_drvdata(pdev);
8738        struct bnx2 *bp = netdev_priv(dev);
8739        pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8740        int err = 0;
8741
8742        rtnl_lock();
8743        if (pci_enable_device(pdev)) {
8744                dev_err(&pdev->dev,
8745                        "Cannot re-enable PCI device after reset\n");
8746        } else {
8747                pci_set_master(pdev);
8748                pci_restore_state(pdev);
8749                pci_save_state(pdev);
8750
8751                if (netif_running(dev))
8752                        err = bnx2_init_nic(bp, 1);
8753
8754                if (!err)
8755                        result = PCI_ERS_RESULT_RECOVERED;
8756        }
8757
8758        if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8759                bnx2_napi_enable(bp);
8760                dev_close(dev);
8761        }
8762        rtnl_unlock();
8763
8764        if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8765                return result;
8766
8767        return result;
8768}
8769
8770/**
8771 * bnx2_io_resume - called when traffic can start flowing again.
8772 * @pdev: Pointer to PCI device
8773 *
8774 * This callback is called when the error recovery driver tells us that
8775 * its OK to resume normal operation.
8776 */
8777static void bnx2_io_resume(struct pci_dev *pdev)
8778{
8779        struct net_device *dev = pci_get_drvdata(pdev);
8780        struct bnx2 *bp = netdev_priv(dev);
8781
8782        rtnl_lock();
8783        if (netif_running(dev))
8784                bnx2_netif_start(bp, true);
8785
8786        netif_device_attach(dev);
8787        rtnl_unlock();
8788}
8789
8790static void bnx2_shutdown(struct pci_dev *pdev)
8791{
8792        struct net_device *dev = pci_get_drvdata(pdev);
8793        struct bnx2 *bp;
8794
8795        if (!dev)
8796                return;
8797
8798        bp = netdev_priv(dev);
8799        if (!bp)
8800                return;
8801
8802        rtnl_lock();
8803        if (netif_running(dev))
8804                dev_close(bp->dev);
8805
8806        if (system_state == SYSTEM_POWER_OFF)
8807                bnx2_set_power_state(bp, PCI_D3hot);
8808
8809        rtnl_unlock();
8810}
8811
8812static const struct pci_error_handlers bnx2_err_handler = {
8813        .error_detected = bnx2_io_error_detected,
8814        .slot_reset     = bnx2_io_slot_reset,
8815        .resume         = bnx2_io_resume,
8816};
8817
8818static struct pci_driver bnx2_pci_driver = {
8819        .name           = DRV_MODULE_NAME,
8820        .id_table       = bnx2_pci_tbl,
8821        .probe          = bnx2_init_one,
8822        .remove         = bnx2_remove_one,
8823        .driver.pm      = BNX2_PM_OPS,
8824        .err_handler    = &bnx2_err_handler,
8825        .shutdown       = bnx2_shutdown,
8826};
8827
8828module_pci_driver(bnx2_pci_driver);
8829