linux/drivers/net/bnx2.c
<<
>>
Prefs
   1/* bnx2.c: Broadcom NX2 network driver.
   2 *
   3 * Copyright (c) 2004-2010 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Written by: Michael Chan  (mchan@broadcom.com)
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16
  17#include <linux/kernel.h>
  18#include <linux/timer.h>
  19#include <linux/errno.h>
  20#include <linux/ioport.h>
  21#include <linux/slab.h>
  22#include <linux/vmalloc.h>
  23#include <linux/interrupt.h>
  24#include <linux/pci.h>
  25#include <linux/init.h>
  26#include <linux/netdevice.h>
  27#include <linux/etherdevice.h>
  28#include <linux/skbuff.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/bitops.h>
  31#include <asm/io.h>
  32#include <asm/irq.h>
  33#include <linux/delay.h>
  34#include <asm/byteorder.h>
  35#include <asm/page.h>
  36#include <linux/time.h>
  37#include <linux/ethtool.h>
  38#include <linux/mii.h>
  39#include <linux/if_vlan.h>
  40#include <net/ip.h>
  41#include <net/tcp.h>
  42#include <net/checksum.h>
  43#include <linux/workqueue.h>
  44#include <linux/crc32.h>
  45#include <linux/prefetch.h>
  46#include <linux/cache.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/aer.h>
  50
  51#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  52#define BCM_CNIC 1
  53#include "cnic_if.h"
  54#endif
  55#include "bnx2.h"
  56#include "bnx2_fw.h"
  57
  58#define DRV_MODULE_NAME         "bnx2"
  59#define DRV_MODULE_VERSION      "2.0.21"
  60#define DRV_MODULE_RELDATE      "Dec 23, 2010"
  61#define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.1.fw"
  62#define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
  63#define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1.fw"
  64#define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  65#define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
  66
  67#define RUN_AT(x) (jiffies + (x))
  68
  69/* Time in jiffies before concluding the transmitter is hung. */
  70#define TX_TIMEOUT  (5*HZ)
  71
  72static char version[] __devinitdata =
  73        "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  74
  75MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  76MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  77MODULE_LICENSE("GPL");
  78MODULE_VERSION(DRV_MODULE_VERSION);
  79MODULE_FIRMWARE(FW_MIPS_FILE_06);
  80MODULE_FIRMWARE(FW_RV2P_FILE_06);
  81MODULE_FIRMWARE(FW_MIPS_FILE_09);
  82MODULE_FIRMWARE(FW_RV2P_FILE_09);
  83MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  84
  85static int disable_msi = 0;
  86
  87module_param(disable_msi, int, 0);
  88MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  89
  90typedef enum {
  91        BCM5706 = 0,
  92        NC370T,
  93        NC370I,
  94        BCM5706S,
  95        NC370F,
  96        BCM5708,
  97        BCM5708S,
  98        BCM5709,
  99        BCM5709S,
 100        BCM5716,
 101        BCM5716S,
 102} board_t;
 103
 104/* indexed by board_t, above */
 105static struct {
 106        char *name;
 107} board_info[] __devinitdata = {
 108        { "Broadcom NetXtreme II BCM5706 1000Base-T" },
 109        { "HP NC370T Multifunction Gigabit Server Adapter" },
 110        { "HP NC370i Multifunction Gigabit Server Adapter" },
 111        { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 112        { "HP NC370F Multifunction Gigabit Server Adapter" },
 113        { "Broadcom NetXtreme II BCM5708 1000Base-T" },
 114        { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 115        { "Broadcom NetXtreme II BCM5709 1000Base-T" },
 116        { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 117        { "Broadcom NetXtreme II BCM5716 1000Base-T" },
 118        { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 119        };
 120
 121static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
 122        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 123          PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 124        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 125          PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 126        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 127          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 128        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 129          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 130        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 131          PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 132        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 133          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 134        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 135          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 136        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 137          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 138        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 139          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 140        { PCI_VENDOR_ID_BROADCOM, 0x163b,
 141          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 142        { PCI_VENDOR_ID_BROADCOM, 0x163c,
 143          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 144        { 0, }
 145};
 146
 147static const struct flash_spec flash_table[] =
 148{
 149#define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 150#define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
 151        /* Slow EEPROM */
 152        {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 153         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 154         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 155         "EEPROM - slow"},
 156        /* Expansion entry 0001 */
 157        {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 158         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 159         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 160         "Entry 0001"},
 161        /* Saifun SA25F010 (non-buffered flash) */
 162        /* strap, cfg1, & write1 need updates */
 163        {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 164         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 165         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 166         "Non-buffered flash (128kB)"},
 167        /* Saifun SA25F020 (non-buffered flash) */
 168        /* strap, cfg1, & write1 need updates */
 169        {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 170         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 171         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 172         "Non-buffered flash (256kB)"},
 173        /* Expansion entry 0100 */
 174        {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 175         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 176         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 177         "Entry 0100"},
 178        /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 179        {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 180         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 181         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 182         "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 183        /* Entry 0110: ST M45PE20 (non-buffered flash)*/
 184        {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 185         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 186         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 187         "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 188        /* Saifun SA25F005 (non-buffered flash) */
 189        /* strap, cfg1, & write1 need updates */
 190        {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 191         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 192         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 193         "Non-buffered flash (64kB)"},
 194        /* Fast EEPROM */
 195        {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 196         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 197         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 198         "EEPROM - fast"},
 199        /* Expansion entry 1001 */
 200        {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 201         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 202         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 203         "Entry 1001"},
 204        /* Expansion entry 1010 */
 205        {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 206         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 207         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 208         "Entry 1010"},
 209        /* ATMEL AT45DB011B (buffered flash) */
 210        {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 211         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 212         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 213         "Buffered flash (128kB)"},
 214        /* Expansion entry 1100 */
 215        {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 216         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 217         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 218         "Entry 1100"},
 219        /* Expansion entry 1101 */
 220        {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 221         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 222         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 223         "Entry 1101"},
 224        /* Ateml Expansion entry 1110 */
 225        {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 226         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 227         BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 228         "Entry 1110 (Atmel)"},
 229        /* ATMEL AT45DB021B (buffered flash) */
 230        {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 231         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 232         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 233         "Buffered flash (256kB)"},
 234};
 235
 236static const struct flash_spec flash_5709 = {
 237        .flags          = BNX2_NV_BUFFERED,
 238        .page_bits      = BCM5709_FLASH_PAGE_BITS,
 239        .page_size      = BCM5709_FLASH_PAGE_SIZE,
 240        .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
 241        .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
 242        .name           = "5709 Buffered flash (256kB)",
 243};
 244
 245MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 246
 247static void bnx2_init_napi(struct bnx2 *bp);
 248static void bnx2_del_napi(struct bnx2 *bp);
 249
 250static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 251{
 252        u32 diff;
 253
 254        /* Tell compiler to fetch tx_prod and tx_cons from memory. */
 255        barrier();
 256
 257        /* The ring uses 256 indices for 255 entries, one of them
 258         * needs to be skipped.
 259         */
 260        diff = txr->tx_prod - txr->tx_cons;
 261        if (unlikely(diff >= TX_DESC_CNT)) {
 262                diff &= 0xffff;
 263                if (diff == TX_DESC_CNT)
 264                        diff = MAX_TX_DESC_CNT;
 265        }
 266        return bp->tx_ring_size - diff;
 267}
 268
 269static u32
 270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 271{
 272        u32 val;
 273
 274        spin_lock_bh(&bp->indirect_lock);
 275        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 276        val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
 277        spin_unlock_bh(&bp->indirect_lock);
 278        return val;
 279}
 280
 281static void
 282bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 283{
 284        spin_lock_bh(&bp->indirect_lock);
 285        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 286        REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 287        spin_unlock_bh(&bp->indirect_lock);
 288}
 289
 290static void
 291bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 292{
 293        bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 294}
 295
 296static u32
 297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 298{
 299        return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
 300}
 301
 302static void
 303bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 304{
 305        offset += cid_addr;
 306        spin_lock_bh(&bp->indirect_lock);
 307        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 308                int i;
 309
 310                REG_WR(bp, BNX2_CTX_CTX_DATA, val);
 311                REG_WR(bp, BNX2_CTX_CTX_CTRL,
 312                       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 313                for (i = 0; i < 5; i++) {
 314                        val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
 315                        if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 316                                break;
 317                        udelay(5);
 318                }
 319        } else {
 320                REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
 321                REG_WR(bp, BNX2_CTX_DATA, val);
 322        }
 323        spin_unlock_bh(&bp->indirect_lock);
 324}
 325
 326#ifdef BCM_CNIC
 327static int
 328bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 329{
 330        struct bnx2 *bp = netdev_priv(dev);
 331        struct drv_ctl_io *io = &info->data.io;
 332
 333        switch (info->cmd) {
 334        case DRV_CTL_IO_WR_CMD:
 335                bnx2_reg_wr_ind(bp, io->offset, io->data);
 336                break;
 337        case DRV_CTL_IO_RD_CMD:
 338                io->data = bnx2_reg_rd_ind(bp, io->offset);
 339                break;
 340        case DRV_CTL_CTX_WR_CMD:
 341                bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 342                break;
 343        default:
 344                return -EINVAL;
 345        }
 346        return 0;
 347}
 348
 349static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 350{
 351        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 352        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 353        int sb_id;
 354
 355        if (bp->flags & BNX2_FLAG_USING_MSIX) {
 356                cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 357                bnapi->cnic_present = 0;
 358                sb_id = bp->irq_nvecs;
 359                cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 360        } else {
 361                cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 362                bnapi->cnic_tag = bnapi->last_status_idx;
 363                bnapi->cnic_present = 1;
 364                sb_id = 0;
 365                cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 366        }
 367
 368        cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 369        cp->irq_arr[0].status_blk = (void *)
 370                ((unsigned long) bnapi->status_blk.msi +
 371                (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 372        cp->irq_arr[0].status_blk_num = sb_id;
 373        cp->num_irq = 1;
 374}
 375
 376static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 377                              void *data)
 378{
 379        struct bnx2 *bp = netdev_priv(dev);
 380        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 381
 382        if (ops == NULL)
 383                return -EINVAL;
 384
 385        if (cp->drv_state & CNIC_DRV_STATE_REGD)
 386                return -EBUSY;
 387
 388        bp->cnic_data = data;
 389        rcu_assign_pointer(bp->cnic_ops, ops);
 390
 391        cp->num_irq = 0;
 392        cp->drv_state = CNIC_DRV_STATE_REGD;
 393
 394        bnx2_setup_cnic_irq_info(bp);
 395
 396        return 0;
 397}
 398
 399static int bnx2_unregister_cnic(struct net_device *dev)
 400{
 401        struct bnx2 *bp = netdev_priv(dev);
 402        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 403        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 404
 405        mutex_lock(&bp->cnic_lock);
 406        cp->drv_state = 0;
 407        bnapi->cnic_present = 0;
 408        rcu_assign_pointer(bp->cnic_ops, NULL);
 409        mutex_unlock(&bp->cnic_lock);
 410        synchronize_rcu();
 411        return 0;
 412}
 413
 414struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 415{
 416        struct bnx2 *bp = netdev_priv(dev);
 417        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 418
 419        cp->drv_owner = THIS_MODULE;
 420        cp->chip_id = bp->chip_id;
 421        cp->pdev = bp->pdev;
 422        cp->io_base = bp->regview;
 423        cp->drv_ctl = bnx2_drv_ctl;
 424        cp->drv_register_cnic = bnx2_register_cnic;
 425        cp->drv_unregister_cnic = bnx2_unregister_cnic;
 426
 427        return cp;
 428}
 429EXPORT_SYMBOL(bnx2_cnic_probe);
 430
 431static void
 432bnx2_cnic_stop(struct bnx2 *bp)
 433{
 434        struct cnic_ops *c_ops;
 435        struct cnic_ctl_info info;
 436
 437        mutex_lock(&bp->cnic_lock);
 438        c_ops = bp->cnic_ops;
 439        if (c_ops) {
 440                info.cmd = CNIC_CTL_STOP_CMD;
 441                c_ops->cnic_ctl(bp->cnic_data, &info);
 442        }
 443        mutex_unlock(&bp->cnic_lock);
 444}
 445
 446static void
 447bnx2_cnic_start(struct bnx2 *bp)
 448{
 449        struct cnic_ops *c_ops;
 450        struct cnic_ctl_info info;
 451
 452        mutex_lock(&bp->cnic_lock);
 453        c_ops = bp->cnic_ops;
 454        if (c_ops) {
 455                if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 456                        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 457
 458                        bnapi->cnic_tag = bnapi->last_status_idx;
 459                }
 460                info.cmd = CNIC_CTL_START_CMD;
 461                c_ops->cnic_ctl(bp->cnic_data, &info);
 462        }
 463        mutex_unlock(&bp->cnic_lock);
 464}
 465
 466#else
 467
 468static void
 469bnx2_cnic_stop(struct bnx2 *bp)
 470{
 471}
 472
 473static void
 474bnx2_cnic_start(struct bnx2 *bp)
 475{
 476}
 477
 478#endif
 479
 480static int
 481bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 482{
 483        u32 val1;
 484        int i, ret;
 485
 486        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 487                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 488                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 489
 490                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 491                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 492
 493                udelay(40);
 494        }
 495
 496        val1 = (bp->phy_addr << 21) | (reg << 16) |
 497                BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 498                BNX2_EMAC_MDIO_COMM_START_BUSY;
 499        REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 500
 501        for (i = 0; i < 50; i++) {
 502                udelay(10);
 503
 504                val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 505                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 506                        udelay(5);
 507
 508                        val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 509                        val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 510
 511                        break;
 512                }
 513        }
 514
 515        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 516                *val = 0x0;
 517                ret = -EBUSY;
 518        }
 519        else {
 520                *val = val1;
 521                ret = 0;
 522        }
 523
 524        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 525                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 526                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 527
 528                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 529                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 530
 531                udelay(40);
 532        }
 533
 534        return ret;
 535}
 536
 537static int
 538bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 539{
 540        u32 val1;
 541        int i, ret;
 542
 543        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 544                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 545                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 546
 547                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 548                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 549
 550                udelay(40);
 551        }
 552
 553        val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 554                BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 555                BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 556        REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 557
 558        for (i = 0; i < 50; i++) {
 559                udelay(10);
 560
 561                val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 562                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 563                        udelay(5);
 564                        break;
 565                }
 566        }
 567
 568        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 569                ret = -EBUSY;
 570        else
 571                ret = 0;
 572
 573        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 574                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 575                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 576
 577                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 578                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 579
 580                udelay(40);
 581        }
 582
 583        return ret;
 584}
 585
 586static void
 587bnx2_disable_int(struct bnx2 *bp)
 588{
 589        int i;
 590        struct bnx2_napi *bnapi;
 591
 592        for (i = 0; i < bp->irq_nvecs; i++) {
 593                bnapi = &bp->bnx2_napi[i];
 594                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 595                       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 596        }
 597        REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 598}
 599
 600static void
 601bnx2_enable_int(struct bnx2 *bp)
 602{
 603        int i;
 604        struct bnx2_napi *bnapi;
 605
 606        for (i = 0; i < bp->irq_nvecs; i++) {
 607                bnapi = &bp->bnx2_napi[i];
 608
 609                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 610                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 611                       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 612                       bnapi->last_status_idx);
 613
 614                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 615                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 616                       bnapi->last_status_idx);
 617        }
 618        REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 619}
 620
 621static void
 622bnx2_disable_int_sync(struct bnx2 *bp)
 623{
 624        int i;
 625
 626        atomic_inc(&bp->intr_sem);
 627        if (!netif_running(bp->dev))
 628                return;
 629
 630        bnx2_disable_int(bp);
 631        for (i = 0; i < bp->irq_nvecs; i++)
 632                synchronize_irq(bp->irq_tbl[i].vector);
 633}
 634
 635static void
 636bnx2_napi_disable(struct bnx2 *bp)
 637{
 638        int i;
 639
 640        for (i = 0; i < bp->irq_nvecs; i++)
 641                napi_disable(&bp->bnx2_napi[i].napi);
 642}
 643
 644static void
 645bnx2_napi_enable(struct bnx2 *bp)
 646{
 647        int i;
 648
 649        for (i = 0; i < bp->irq_nvecs; i++)
 650                napi_enable(&bp->bnx2_napi[i].napi);
 651}
 652
 653static void
 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 655{
 656        if (stop_cnic)
 657                bnx2_cnic_stop(bp);
 658        if (netif_running(bp->dev)) {
 659                bnx2_napi_disable(bp);
 660                netif_tx_disable(bp->dev);
 661        }
 662        bnx2_disable_int_sync(bp);
 663        netif_carrier_off(bp->dev);     /* prevent tx timeout */
 664}
 665
 666static void
 667bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 668{
 669        if (atomic_dec_and_test(&bp->intr_sem)) {
 670                if (netif_running(bp->dev)) {
 671                        netif_tx_wake_all_queues(bp->dev);
 672                        spin_lock_bh(&bp->phy_lock);
 673                        if (bp->link_up)
 674                                netif_carrier_on(bp->dev);
 675                        spin_unlock_bh(&bp->phy_lock);
 676                        bnx2_napi_enable(bp);
 677                        bnx2_enable_int(bp);
 678                        if (start_cnic)
 679                                bnx2_cnic_start(bp);
 680                }
 681        }
 682}
 683
 684static void
 685bnx2_free_tx_mem(struct bnx2 *bp)
 686{
 687        int i;
 688
 689        for (i = 0; i < bp->num_tx_rings; i++) {
 690                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 691                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 692
 693                if (txr->tx_desc_ring) {
 694                        dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 695                                          txr->tx_desc_ring,
 696                                          txr->tx_desc_mapping);
 697                        txr->tx_desc_ring = NULL;
 698                }
 699                kfree(txr->tx_buf_ring);
 700                txr->tx_buf_ring = NULL;
 701        }
 702}
 703
 704static void
 705bnx2_free_rx_mem(struct bnx2 *bp)
 706{
 707        int i;
 708
 709        for (i = 0; i < bp->num_rx_rings; i++) {
 710                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 711                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 712                int j;
 713
 714                for (j = 0; j < bp->rx_max_ring; j++) {
 715                        if (rxr->rx_desc_ring[j])
 716                                dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 717                                                  rxr->rx_desc_ring[j],
 718                                                  rxr->rx_desc_mapping[j]);
 719                        rxr->rx_desc_ring[j] = NULL;
 720                }
 721                vfree(rxr->rx_buf_ring);
 722                rxr->rx_buf_ring = NULL;
 723
 724                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 725                        if (rxr->rx_pg_desc_ring[j])
 726                                dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 727                                                  rxr->rx_pg_desc_ring[j],
 728                                                  rxr->rx_pg_desc_mapping[j]);
 729                        rxr->rx_pg_desc_ring[j] = NULL;
 730                }
 731                vfree(rxr->rx_pg_ring);
 732                rxr->rx_pg_ring = NULL;
 733        }
 734}
 735
 736static int
 737bnx2_alloc_tx_mem(struct bnx2 *bp)
 738{
 739        int i;
 740
 741        for (i = 0; i < bp->num_tx_rings; i++) {
 742                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 743                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 744
 745                txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 746                if (txr->tx_buf_ring == NULL)
 747                        return -ENOMEM;
 748
 749                txr->tx_desc_ring =
 750                        dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 751                                           &txr->tx_desc_mapping, GFP_KERNEL);
 752                if (txr->tx_desc_ring == NULL)
 753                        return -ENOMEM;
 754        }
 755        return 0;
 756}
 757
 758static int
 759bnx2_alloc_rx_mem(struct bnx2 *bp)
 760{
 761        int i;
 762
 763        for (i = 0; i < bp->num_rx_rings; i++) {
 764                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 765                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 766                int j;
 767
 768                rxr->rx_buf_ring =
 769                        vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
 770                if (rxr->rx_buf_ring == NULL)
 771                        return -ENOMEM;
 772
 773                for (j = 0; j < bp->rx_max_ring; j++) {
 774                        rxr->rx_desc_ring[j] =
 775                                dma_alloc_coherent(&bp->pdev->dev,
 776                                                   RXBD_RING_SIZE,
 777                                                   &rxr->rx_desc_mapping[j],
 778                                                   GFP_KERNEL);
 779                        if (rxr->rx_desc_ring[j] == NULL)
 780                                return -ENOMEM;
 781
 782                }
 783
 784                if (bp->rx_pg_ring_size) {
 785                        rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
 786                                                  bp->rx_max_pg_ring);
 787                        if (rxr->rx_pg_ring == NULL)
 788                                return -ENOMEM;
 789
 790                }
 791
 792                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 793                        rxr->rx_pg_desc_ring[j] =
 794                                dma_alloc_coherent(&bp->pdev->dev,
 795                                                   RXBD_RING_SIZE,
 796                                                   &rxr->rx_pg_desc_mapping[j],
 797                                                   GFP_KERNEL);
 798                        if (rxr->rx_pg_desc_ring[j] == NULL)
 799                                return -ENOMEM;
 800
 801                }
 802        }
 803        return 0;
 804}
 805
 806static void
 807bnx2_free_mem(struct bnx2 *bp)
 808{
 809        int i;
 810        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 811
 812        bnx2_free_tx_mem(bp);
 813        bnx2_free_rx_mem(bp);
 814
 815        for (i = 0; i < bp->ctx_pages; i++) {
 816                if (bp->ctx_blk[i]) {
 817                        dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
 818                                          bp->ctx_blk[i],
 819                                          bp->ctx_blk_mapping[i]);
 820                        bp->ctx_blk[i] = NULL;
 821                }
 822        }
 823        if (bnapi->status_blk.msi) {
 824                dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
 825                                  bnapi->status_blk.msi,
 826                                  bp->status_blk_mapping);
 827                bnapi->status_blk.msi = NULL;
 828                bp->stats_blk = NULL;
 829        }
 830}
 831
 832static int
 833bnx2_alloc_mem(struct bnx2 *bp)
 834{
 835        int i, status_blk_size, err;
 836        struct bnx2_napi *bnapi;
 837        void *status_blk;
 838
 839        /* Combine status and statistics blocks into one allocation. */
 840        status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 841        if (bp->flags & BNX2_FLAG_MSIX_CAP)
 842                status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 843                                                 BNX2_SBLK_MSIX_ALIGN_SIZE);
 844        bp->status_stats_size = status_blk_size +
 845                                sizeof(struct statistics_block);
 846
 847        status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 848                                        &bp->status_blk_mapping, GFP_KERNEL);
 849        if (status_blk == NULL)
 850                goto alloc_mem_err;
 851
 852        memset(status_blk, 0, bp->status_stats_size);
 853
 854        bnapi = &bp->bnx2_napi[0];
 855        bnapi->status_blk.msi = status_blk;
 856        bnapi->hw_tx_cons_ptr =
 857                &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 858        bnapi->hw_rx_cons_ptr =
 859                &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 860        if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 861                for (i = 1; i < bp->irq_nvecs; i++) {
 862                        struct status_block_msix *sblk;
 863
 864                        bnapi = &bp->bnx2_napi[i];
 865
 866                        sblk = (void *) (status_blk +
 867                                         BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 868                        bnapi->status_blk.msix = sblk;
 869                        bnapi->hw_tx_cons_ptr =
 870                                &sblk->status_tx_quick_consumer_index;
 871                        bnapi->hw_rx_cons_ptr =
 872                                &sblk->status_rx_quick_consumer_index;
 873                        bnapi->int_num = i << 24;
 874                }
 875        }
 876
 877        bp->stats_blk = status_blk + status_blk_size;
 878
 879        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 880
 881        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 882                bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
 883                if (bp->ctx_pages == 0)
 884                        bp->ctx_pages = 1;
 885                for (i = 0; i < bp->ctx_pages; i++) {
 886                        bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
 887                                                BCM_PAGE_SIZE,
 888                                                &bp->ctx_blk_mapping[i],
 889                                                GFP_KERNEL);
 890                        if (bp->ctx_blk[i] == NULL)
 891                                goto alloc_mem_err;
 892                }
 893        }
 894
 895        err = bnx2_alloc_rx_mem(bp);
 896        if (err)
 897                goto alloc_mem_err;
 898
 899        err = bnx2_alloc_tx_mem(bp);
 900        if (err)
 901                goto alloc_mem_err;
 902
 903        return 0;
 904
 905alloc_mem_err:
 906        bnx2_free_mem(bp);
 907        return -ENOMEM;
 908}
 909
 910static void
 911bnx2_report_fw_link(struct bnx2 *bp)
 912{
 913        u32 fw_link_status = 0;
 914
 915        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 916                return;
 917
 918        if (bp->link_up) {
 919                u32 bmsr;
 920
 921                switch (bp->line_speed) {
 922                case SPEED_10:
 923                        if (bp->duplex == DUPLEX_HALF)
 924                                fw_link_status = BNX2_LINK_STATUS_10HALF;
 925                        else
 926                                fw_link_status = BNX2_LINK_STATUS_10FULL;
 927                        break;
 928                case SPEED_100:
 929                        if (bp->duplex == DUPLEX_HALF)
 930                                fw_link_status = BNX2_LINK_STATUS_100HALF;
 931                        else
 932                                fw_link_status = BNX2_LINK_STATUS_100FULL;
 933                        break;
 934                case SPEED_1000:
 935                        if (bp->duplex == DUPLEX_HALF)
 936                                fw_link_status = BNX2_LINK_STATUS_1000HALF;
 937                        else
 938                                fw_link_status = BNX2_LINK_STATUS_1000FULL;
 939                        break;
 940                case SPEED_2500:
 941                        if (bp->duplex == DUPLEX_HALF)
 942                                fw_link_status = BNX2_LINK_STATUS_2500HALF;
 943                        else
 944                                fw_link_status = BNX2_LINK_STATUS_2500FULL;
 945                        break;
 946                }
 947
 948                fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 949
 950                if (bp->autoneg) {
 951                        fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 952
 953                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 954                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 955
 956                        if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 957                            bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 958                                fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 959                        else
 960                                fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 961                }
 962        }
 963        else
 964                fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 965
 966        bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 967}
 968
 969static char *
 970bnx2_xceiver_str(struct bnx2 *bp)
 971{
 972        return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
 973                ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 974                 "Copper");
 975}
 976
 977static void
 978bnx2_report_link(struct bnx2 *bp)
 979{
 980        if (bp->link_up) {
 981                netif_carrier_on(bp->dev);
 982                netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
 983                            bnx2_xceiver_str(bp),
 984                            bp->line_speed,
 985                            bp->duplex == DUPLEX_FULL ? "full" : "half");
 986
 987                if (bp->flow_ctrl) {
 988                        if (bp->flow_ctrl & FLOW_CTRL_RX) {
 989                                pr_cont(", receive ");
 990                                if (bp->flow_ctrl & FLOW_CTRL_TX)
 991                                        pr_cont("& transmit ");
 992                        }
 993                        else {
 994                                pr_cont(", transmit ");
 995                        }
 996                        pr_cont("flow control ON");
 997                }
 998                pr_cont("\n");
 999        } else {
1000                netif_carrier_off(bp->dev);
1001                netdev_err(bp->dev, "NIC %s Link is Down\n",
1002                           bnx2_xceiver_str(bp));
1003        }
1004
1005        bnx2_report_fw_link(bp);
1006}
1007
1008static void
1009bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1010{
1011        u32 local_adv, remote_adv;
1012
1013        bp->flow_ctrl = 0;
1014        if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1015                (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1016
1017                if (bp->duplex == DUPLEX_FULL) {
1018                        bp->flow_ctrl = bp->req_flow_ctrl;
1019                }
1020                return;
1021        }
1022
1023        if (bp->duplex != DUPLEX_FULL) {
1024                return;
1025        }
1026
1027        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1028            (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1029                u32 val;
1030
1031                bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1032                if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1033                        bp->flow_ctrl |= FLOW_CTRL_TX;
1034                if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1035                        bp->flow_ctrl |= FLOW_CTRL_RX;
1036                return;
1037        }
1038
1039        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1040        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1041
1042        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1043                u32 new_local_adv = 0;
1044                u32 new_remote_adv = 0;
1045
1046                if (local_adv & ADVERTISE_1000XPAUSE)
1047                        new_local_adv |= ADVERTISE_PAUSE_CAP;
1048                if (local_adv & ADVERTISE_1000XPSE_ASYM)
1049                        new_local_adv |= ADVERTISE_PAUSE_ASYM;
1050                if (remote_adv & ADVERTISE_1000XPAUSE)
1051                        new_remote_adv |= ADVERTISE_PAUSE_CAP;
1052                if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1053                        new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1054
1055                local_adv = new_local_adv;
1056                remote_adv = new_remote_adv;
1057        }
1058
1059        /* See Table 28B-3 of 802.3ab-1999 spec. */
1060        if (local_adv & ADVERTISE_PAUSE_CAP) {
1061                if(local_adv & ADVERTISE_PAUSE_ASYM) {
1062                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1063                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1064                        }
1065                        else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1066                                bp->flow_ctrl = FLOW_CTRL_RX;
1067                        }
1068                }
1069                else {
1070                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                        }
1073                }
1074        }
1075        else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1076                if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1077                        (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1078
1079                        bp->flow_ctrl = FLOW_CTRL_TX;
1080                }
1081        }
1082}
1083
1084static int
1085bnx2_5709s_linkup(struct bnx2 *bp)
1086{
1087        u32 val, speed;
1088
1089        bp->link_up = 1;
1090
1091        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1092        bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1093        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1094
1095        if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1096                bp->line_speed = bp->req_line_speed;
1097                bp->duplex = bp->req_duplex;
1098                return 0;
1099        }
1100        speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1101        switch (speed) {
1102                case MII_BNX2_GP_TOP_AN_SPEED_10:
1103                        bp->line_speed = SPEED_10;
1104                        break;
1105                case MII_BNX2_GP_TOP_AN_SPEED_100:
1106                        bp->line_speed = SPEED_100;
1107                        break;
1108                case MII_BNX2_GP_TOP_AN_SPEED_1G:
1109                case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1110                        bp->line_speed = SPEED_1000;
1111                        break;
1112                case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1113                        bp->line_speed = SPEED_2500;
1114                        break;
1115        }
1116        if (val & MII_BNX2_GP_TOP_AN_FD)
1117                bp->duplex = DUPLEX_FULL;
1118        else
1119                bp->duplex = DUPLEX_HALF;
1120        return 0;
1121}
1122
1123static int
1124bnx2_5708s_linkup(struct bnx2 *bp)
1125{
1126        u32 val;
1127
1128        bp->link_up = 1;
1129        bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1130        switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1131                case BCM5708S_1000X_STAT1_SPEED_10:
1132                        bp->line_speed = SPEED_10;
1133                        break;
1134                case BCM5708S_1000X_STAT1_SPEED_100:
1135                        bp->line_speed = SPEED_100;
1136                        break;
1137                case BCM5708S_1000X_STAT1_SPEED_1G:
1138                        bp->line_speed = SPEED_1000;
1139                        break;
1140                case BCM5708S_1000X_STAT1_SPEED_2G5:
1141                        bp->line_speed = SPEED_2500;
1142                        break;
1143        }
1144        if (val & BCM5708S_1000X_STAT1_FD)
1145                bp->duplex = DUPLEX_FULL;
1146        else
1147                bp->duplex = DUPLEX_HALF;
1148
1149        return 0;
1150}
1151
1152static int
1153bnx2_5706s_linkup(struct bnx2 *bp)
1154{
1155        u32 bmcr, local_adv, remote_adv, common;
1156
1157        bp->link_up = 1;
1158        bp->line_speed = SPEED_1000;
1159
1160        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1161        if (bmcr & BMCR_FULLDPLX) {
1162                bp->duplex = DUPLEX_FULL;
1163        }
1164        else {
1165                bp->duplex = DUPLEX_HALF;
1166        }
1167
1168        if (!(bmcr & BMCR_ANENABLE)) {
1169                return 0;
1170        }
1171
1172        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1173        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1174
1175        common = local_adv & remote_adv;
1176        if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1177
1178                if (common & ADVERTISE_1000XFULL) {
1179                        bp->duplex = DUPLEX_FULL;
1180                }
1181                else {
1182                        bp->duplex = DUPLEX_HALF;
1183                }
1184        }
1185
1186        return 0;
1187}
1188
1189static int
1190bnx2_copper_linkup(struct bnx2 *bp)
1191{
1192        u32 bmcr;
1193
1194        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1195        if (bmcr & BMCR_ANENABLE) {
1196                u32 local_adv, remote_adv, common;
1197
1198                bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1199                bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1200
1201                common = local_adv & (remote_adv >> 2);
1202                if (common & ADVERTISE_1000FULL) {
1203                        bp->line_speed = SPEED_1000;
1204                        bp->duplex = DUPLEX_FULL;
1205                }
1206                else if (common & ADVERTISE_1000HALF) {
1207                        bp->line_speed = SPEED_1000;
1208                        bp->duplex = DUPLEX_HALF;
1209                }
1210                else {
1211                        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1212                        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1213
1214                        common = local_adv & remote_adv;
1215                        if (common & ADVERTISE_100FULL) {
1216                                bp->line_speed = SPEED_100;
1217                                bp->duplex = DUPLEX_FULL;
1218                        }
1219                        else if (common & ADVERTISE_100HALF) {
1220                                bp->line_speed = SPEED_100;
1221                                bp->duplex = DUPLEX_HALF;
1222                        }
1223                        else if (common & ADVERTISE_10FULL) {
1224                                bp->line_speed = SPEED_10;
1225                                bp->duplex = DUPLEX_FULL;
1226                        }
1227                        else if (common & ADVERTISE_10HALF) {
1228                                bp->line_speed = SPEED_10;
1229                                bp->duplex = DUPLEX_HALF;
1230                        }
1231                        else {
1232                                bp->line_speed = 0;
1233                                bp->link_up = 0;
1234                        }
1235                }
1236        }
1237        else {
1238                if (bmcr & BMCR_SPEED100) {
1239                        bp->line_speed = SPEED_100;
1240                }
1241                else {
1242                        bp->line_speed = SPEED_10;
1243                }
1244                if (bmcr & BMCR_FULLDPLX) {
1245                        bp->duplex = DUPLEX_FULL;
1246                }
1247                else {
1248                        bp->duplex = DUPLEX_HALF;
1249                }
1250        }
1251
1252        return 0;
1253}
1254
1255static void
1256bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1257{
1258        u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1259
1260        val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1261        val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1262        val |= 0x02 << 8;
1263
1264        if (bp->flow_ctrl & FLOW_CTRL_TX)
1265                val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1266
1267        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1268}
1269
1270static void
1271bnx2_init_all_rx_contexts(struct bnx2 *bp)
1272{
1273        int i;
1274        u32 cid;
1275
1276        for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1277                if (i == 1)
1278                        cid = RX_RSS_CID;
1279                bnx2_init_rx_context(bp, cid);
1280        }
1281}
1282
1283static void
1284bnx2_set_mac_link(struct bnx2 *bp)
1285{
1286        u32 val;
1287
1288        REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1289        if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1290                (bp->duplex == DUPLEX_HALF)) {
1291                REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1292        }
1293
1294        /* Configure the EMAC mode register. */
1295        val = REG_RD(bp, BNX2_EMAC_MODE);
1296
1297        val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1298                BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1299                BNX2_EMAC_MODE_25G_MODE);
1300
1301        if (bp->link_up) {
1302                switch (bp->line_speed) {
1303                        case SPEED_10:
1304                                if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1305                                        val |= BNX2_EMAC_MODE_PORT_MII_10M;
1306                                        break;
1307                                }
1308                                /* fall through */
1309                        case SPEED_100:
1310                                val |= BNX2_EMAC_MODE_PORT_MII;
1311                                break;
1312                        case SPEED_2500:
1313                                val |= BNX2_EMAC_MODE_25G_MODE;
1314                                /* fall through */
1315                        case SPEED_1000:
1316                                val |= BNX2_EMAC_MODE_PORT_GMII;
1317                                break;
1318                }
1319        }
1320        else {
1321                val |= BNX2_EMAC_MODE_PORT_GMII;
1322        }
1323
1324        /* Set the MAC to operate in the appropriate duplex mode. */
1325        if (bp->duplex == DUPLEX_HALF)
1326                val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1327        REG_WR(bp, BNX2_EMAC_MODE, val);
1328
1329        /* Enable/disable rx PAUSE. */
1330        bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1331
1332        if (bp->flow_ctrl & FLOW_CTRL_RX)
1333                bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1334        REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1335
1336        /* Enable/disable tx PAUSE. */
1337        val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1338        val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1339
1340        if (bp->flow_ctrl & FLOW_CTRL_TX)
1341                val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1342        REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1343
1344        /* Acknowledge the interrupt. */
1345        REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1346
1347        bnx2_init_all_rx_contexts(bp);
1348}
1349
1350static void
1351bnx2_enable_bmsr1(struct bnx2 *bp)
1352{
1353        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1354            (CHIP_NUM(bp) == CHIP_NUM_5709))
1355                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1356                               MII_BNX2_BLK_ADDR_GP_STATUS);
1357}
1358
1359static void
1360bnx2_disable_bmsr1(struct bnx2 *bp)
1361{
1362        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363            (CHIP_NUM(bp) == CHIP_NUM_5709))
1364                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1366}
1367
1368static int
1369bnx2_test_and_enable_2g5(struct bnx2 *bp)
1370{
1371        u32 up1;
1372        int ret = 1;
1373
1374        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1375                return 0;
1376
1377        if (bp->autoneg & AUTONEG_SPEED)
1378                bp->advertising |= ADVERTISED_2500baseX_Full;
1379
1380        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1381                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1382
1383        bnx2_read_phy(bp, bp->mii_up1, &up1);
1384        if (!(up1 & BCM5708S_UP1_2G5)) {
1385                up1 |= BCM5708S_UP1_2G5;
1386                bnx2_write_phy(bp, bp->mii_up1, up1);
1387                ret = 0;
1388        }
1389
1390        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1391                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1392                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1393
1394        return ret;
1395}
1396
1397static int
1398bnx2_test_and_disable_2g5(struct bnx2 *bp)
1399{
1400        u32 up1;
1401        int ret = 0;
1402
1403        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1404                return 0;
1405
1406        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409        bnx2_read_phy(bp, bp->mii_up1, &up1);
1410        if (up1 & BCM5708S_UP1_2G5) {
1411                up1 &= ~BCM5708S_UP1_2G5;
1412                bnx2_write_phy(bp, bp->mii_up1, up1);
1413                ret = 1;
1414        }
1415
1416        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420        return ret;
1421}
1422
1423static void
1424bnx2_enable_forced_2g5(struct bnx2 *bp)
1425{
1426        u32 uninitialized_var(bmcr);
1427        int err;
1428
1429        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430                return;
1431
1432        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1433                u32 val;
1434
1435                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1436                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1437                if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1438                        val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1439                        val |= MII_BNX2_SD_MISC1_FORCE |
1440                                MII_BNX2_SD_MISC1_FORCE_2_5G;
1441                        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1442                }
1443
1444                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1446                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1447
1448        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1449                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1450                if (!err)
1451                        bmcr |= BCM5708S_BMCR_FORCE_2500;
1452        } else {
1453                return;
1454        }
1455
1456        if (err)
1457                return;
1458
1459        if (bp->autoneg & AUTONEG_SPEED) {
1460                bmcr &= ~BMCR_ANENABLE;
1461                if (bp->req_duplex == DUPLEX_FULL)
1462                        bmcr |= BMCR_FULLDPLX;
1463        }
1464        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1465}
1466
1467static void
1468bnx2_disable_forced_2g5(struct bnx2 *bp)
1469{
1470        u32 uninitialized_var(bmcr);
1471        int err;
1472
1473        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1474                return;
1475
1476        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1477                u32 val;
1478
1479                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1480                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1481                if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1482                        val &= ~MII_BNX2_SD_MISC1_FORCE;
1483                        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1484                }
1485
1486                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1488                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1489
1490        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1491                err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1492                if (!err)
1493                        bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1494        } else {
1495                return;
1496        }
1497
1498        if (err)
1499                return;
1500
1501        if (bp->autoneg & AUTONEG_SPEED)
1502                bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1503        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1504}
1505
1506static void
1507bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1508{
1509        u32 val;
1510
1511        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1512        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1513        if (start)
1514                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1515        else
1516                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1517}
1518
1519static int
1520bnx2_set_link(struct bnx2 *bp)
1521{
1522        u32 bmsr;
1523        u8 link_up;
1524
1525        if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1526                bp->link_up = 1;
1527                return 0;
1528        }
1529
1530        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1531                return 0;
1532
1533        link_up = bp->link_up;
1534
1535        bnx2_enable_bmsr1(bp);
1536        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1537        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1538        bnx2_disable_bmsr1(bp);
1539
1540        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1541            (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1542                u32 val, an_dbg;
1543
1544                if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1545                        bnx2_5706s_force_link_dn(bp, 0);
1546                        bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1547                }
1548                val = REG_RD(bp, BNX2_EMAC_STATUS);
1549
1550                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1551                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1552                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1553
1554                if ((val & BNX2_EMAC_STATUS_LINK) &&
1555                    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1556                        bmsr |= BMSR_LSTATUS;
1557                else
1558                        bmsr &= ~BMSR_LSTATUS;
1559        }
1560
1561        if (bmsr & BMSR_LSTATUS) {
1562                bp->link_up = 1;
1563
1564                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1565                        if (CHIP_NUM(bp) == CHIP_NUM_5706)
1566                                bnx2_5706s_linkup(bp);
1567                        else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1568                                bnx2_5708s_linkup(bp);
1569                        else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1570                                bnx2_5709s_linkup(bp);
1571                }
1572                else {
1573                        bnx2_copper_linkup(bp);
1574                }
1575                bnx2_resolve_flow_ctrl(bp);
1576        }
1577        else {
1578                if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1579                    (bp->autoneg & AUTONEG_SPEED))
1580                        bnx2_disable_forced_2g5(bp);
1581
1582                if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1583                        u32 bmcr;
1584
1585                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1586                        bmcr |= BMCR_ANENABLE;
1587                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1588
1589                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1590                }
1591                bp->link_up = 0;
1592        }
1593
1594        if (bp->link_up != link_up) {
1595                bnx2_report_link(bp);
1596        }
1597
1598        bnx2_set_mac_link(bp);
1599
1600        return 0;
1601}
1602
1603static int
1604bnx2_reset_phy(struct bnx2 *bp)
1605{
1606        int i;
1607        u32 reg;
1608
1609        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1610
1611#define PHY_RESET_MAX_WAIT 100
1612        for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1613                udelay(10);
1614
1615                bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1616                if (!(reg & BMCR_RESET)) {
1617                        udelay(20);
1618                        break;
1619                }
1620        }
1621        if (i == PHY_RESET_MAX_WAIT) {
1622                return -EBUSY;
1623        }
1624        return 0;
1625}
1626
1627static u32
1628bnx2_phy_get_pause_adv(struct bnx2 *bp)
1629{
1630        u32 adv = 0;
1631
1632        if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1633                (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1634
1635                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1636                        adv = ADVERTISE_1000XPAUSE;
1637                }
1638                else {
1639                        adv = ADVERTISE_PAUSE_CAP;
1640                }
1641        }
1642        else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1643                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                        adv = ADVERTISE_1000XPSE_ASYM;
1645                }
1646                else {
1647                        adv = ADVERTISE_PAUSE_ASYM;
1648                }
1649        }
1650        else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1651                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                        adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1653                }
1654                else {
1655                        adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1656                }
1657        }
1658        return adv;
1659}
1660
1661static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1662
1663static int
1664bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1665__releases(&bp->phy_lock)
1666__acquires(&bp->phy_lock)
1667{
1668        u32 speed_arg = 0, pause_adv;
1669
1670        pause_adv = bnx2_phy_get_pause_adv(bp);
1671
1672        if (bp->autoneg & AUTONEG_SPEED) {
1673                speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1674                if (bp->advertising & ADVERTISED_10baseT_Half)
1675                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1676                if (bp->advertising & ADVERTISED_10baseT_Full)
1677                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1678                if (bp->advertising & ADVERTISED_100baseT_Half)
1679                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1680                if (bp->advertising & ADVERTISED_100baseT_Full)
1681                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1682                if (bp->advertising & ADVERTISED_1000baseT_Full)
1683                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1684                if (bp->advertising & ADVERTISED_2500baseX_Full)
1685                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1686        } else {
1687                if (bp->req_line_speed == SPEED_2500)
1688                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1689                else if (bp->req_line_speed == SPEED_1000)
1690                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691                else if (bp->req_line_speed == SPEED_100) {
1692                        if (bp->req_duplex == DUPLEX_FULL)
1693                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1694                        else
1695                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696                } else if (bp->req_line_speed == SPEED_10) {
1697                        if (bp->req_duplex == DUPLEX_FULL)
1698                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1699                        else
1700                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1701                }
1702        }
1703
1704        if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1705                speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1706        if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1707                speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1708
1709        if (port == PORT_TP)
1710                speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1711                             BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1712
1713        bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1714
1715        spin_unlock_bh(&bp->phy_lock);
1716        bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1717        spin_lock_bh(&bp->phy_lock);
1718
1719        return 0;
1720}
1721
1722static int
1723bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1724__releases(&bp->phy_lock)
1725__acquires(&bp->phy_lock)
1726{
1727        u32 adv, bmcr;
1728        u32 new_adv = 0;
1729
1730        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1731                return bnx2_setup_remote_phy(bp, port);
1732
1733        if (!(bp->autoneg & AUTONEG_SPEED)) {
1734                u32 new_bmcr;
1735                int force_link_down = 0;
1736
1737                if (bp->req_line_speed == SPEED_2500) {
1738                        if (!bnx2_test_and_enable_2g5(bp))
1739                                force_link_down = 1;
1740                } else if (bp->req_line_speed == SPEED_1000) {
1741                        if (bnx2_test_and_disable_2g5(bp))
1742                                force_link_down = 1;
1743                }
1744                bnx2_read_phy(bp, bp->mii_adv, &adv);
1745                adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1746
1747                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1748                new_bmcr = bmcr & ~BMCR_ANENABLE;
1749                new_bmcr |= BMCR_SPEED1000;
1750
1751                if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1752                        if (bp->req_line_speed == SPEED_2500)
1753                                bnx2_enable_forced_2g5(bp);
1754                        else if (bp->req_line_speed == SPEED_1000) {
1755                                bnx2_disable_forced_2g5(bp);
1756                                new_bmcr &= ~0x2000;
1757                        }
1758
1759                } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1760                        if (bp->req_line_speed == SPEED_2500)
1761                                new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1762                        else
1763                                new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1764                }
1765
1766                if (bp->req_duplex == DUPLEX_FULL) {
1767                        adv |= ADVERTISE_1000XFULL;
1768                        new_bmcr |= BMCR_FULLDPLX;
1769                }
1770                else {
1771                        adv |= ADVERTISE_1000XHALF;
1772                        new_bmcr &= ~BMCR_FULLDPLX;
1773                }
1774                if ((new_bmcr != bmcr) || (force_link_down)) {
1775                        /* Force a link down visible on the other side */
1776                        if (bp->link_up) {
1777                                bnx2_write_phy(bp, bp->mii_adv, adv &
1778                                               ~(ADVERTISE_1000XFULL |
1779                                                 ADVERTISE_1000XHALF));
1780                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1781                                        BMCR_ANRESTART | BMCR_ANENABLE);
1782
1783                                bp->link_up = 0;
1784                                netif_carrier_off(bp->dev);
1785                                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1786                                bnx2_report_link(bp);
1787                        }
1788                        bnx2_write_phy(bp, bp->mii_adv, adv);
1789                        bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1790                } else {
1791                        bnx2_resolve_flow_ctrl(bp);
1792                        bnx2_set_mac_link(bp);
1793                }
1794                return 0;
1795        }
1796
1797        bnx2_test_and_enable_2g5(bp);
1798
1799        if (bp->advertising & ADVERTISED_1000baseT_Full)
1800                new_adv |= ADVERTISE_1000XFULL;
1801
1802        new_adv |= bnx2_phy_get_pause_adv(bp);
1803
1804        bnx2_read_phy(bp, bp->mii_adv, &adv);
1805        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1806
1807        bp->serdes_an_pending = 0;
1808        if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1809                /* Force a link down visible on the other side */
1810                if (bp->link_up) {
1811                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1812                        spin_unlock_bh(&bp->phy_lock);
1813                        msleep(20);
1814                        spin_lock_bh(&bp->phy_lock);
1815                }
1816
1817                bnx2_write_phy(bp, bp->mii_adv, new_adv);
1818                bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1819                        BMCR_ANENABLE);
1820                /* Speed up link-up time when the link partner
1821                 * does not autonegotiate which is very common
1822                 * in blade servers. Some blade servers use
1823                 * IPMI for kerboard input and it's important
1824                 * to minimize link disruptions. Autoneg. involves
1825                 * exchanging base pages plus 3 next pages and
1826                 * normally completes in about 120 msec.
1827                 */
1828                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1829                bp->serdes_an_pending = 1;
1830                mod_timer(&bp->timer, jiffies + bp->current_interval);
1831        } else {
1832                bnx2_resolve_flow_ctrl(bp);
1833                bnx2_set_mac_link(bp);
1834        }
1835
1836        return 0;
1837}
1838
1839#define ETHTOOL_ALL_FIBRE_SPEED                                         \
1840        (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1841                (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1842                (ADVERTISED_1000baseT_Full)
1843
1844#define ETHTOOL_ALL_COPPER_SPEED                                        \
1845        (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1846        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1847        ADVERTISED_1000baseT_Full)
1848
1849#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1850        ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1851
1852#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1853
1854static void
1855bnx2_set_default_remote_link(struct bnx2 *bp)
1856{
1857        u32 link;
1858
1859        if (bp->phy_port == PORT_TP)
1860                link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1861        else
1862                link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1863
1864        if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1865                bp->req_line_speed = 0;
1866                bp->autoneg |= AUTONEG_SPEED;
1867                bp->advertising = ADVERTISED_Autoneg;
1868                if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1869                        bp->advertising |= ADVERTISED_10baseT_Half;
1870                if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1871                        bp->advertising |= ADVERTISED_10baseT_Full;
1872                if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1873                        bp->advertising |= ADVERTISED_100baseT_Half;
1874                if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1875                        bp->advertising |= ADVERTISED_100baseT_Full;
1876                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1877                        bp->advertising |= ADVERTISED_1000baseT_Full;
1878                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1879                        bp->advertising |= ADVERTISED_2500baseX_Full;
1880        } else {
1881                bp->autoneg = 0;
1882                bp->advertising = 0;
1883                bp->req_duplex = DUPLEX_FULL;
1884                if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1885                        bp->req_line_speed = SPEED_10;
1886                        if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1887                                bp->req_duplex = DUPLEX_HALF;
1888                }
1889                if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1890                        bp->req_line_speed = SPEED_100;
1891                        if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1892                                bp->req_duplex = DUPLEX_HALF;
1893                }
1894                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1895                        bp->req_line_speed = SPEED_1000;
1896                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1897                        bp->req_line_speed = SPEED_2500;
1898        }
1899}
1900
1901static void
1902bnx2_set_default_link(struct bnx2 *bp)
1903{
1904        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1905                bnx2_set_default_remote_link(bp);
1906                return;
1907        }
1908
1909        bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1910        bp->req_line_speed = 0;
1911        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1912                u32 reg;
1913
1914                bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1915
1916                reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1917                reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1918                if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1919                        bp->autoneg = 0;
1920                        bp->req_line_speed = bp->line_speed = SPEED_1000;
1921                        bp->req_duplex = DUPLEX_FULL;
1922                }
1923        } else
1924                bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1925}
1926
1927static void
1928bnx2_send_heart_beat(struct bnx2 *bp)
1929{
1930        u32 msg;
1931        u32 addr;
1932
1933        spin_lock(&bp->indirect_lock);
1934        msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1935        addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1936        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1937        REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1938        spin_unlock(&bp->indirect_lock);
1939}
1940
1941static void
1942bnx2_remote_phy_event(struct bnx2 *bp)
1943{
1944        u32 msg;
1945        u8 link_up = bp->link_up;
1946        u8 old_port;
1947
1948        msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1949
1950        if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1951                bnx2_send_heart_beat(bp);
1952
1953        msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1954
1955        if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1956                bp->link_up = 0;
1957        else {
1958                u32 speed;
1959
1960                bp->link_up = 1;
1961                speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1962                bp->duplex = DUPLEX_FULL;
1963                switch (speed) {
1964                        case BNX2_LINK_STATUS_10HALF:
1965                                bp->duplex = DUPLEX_HALF;
1966                        case BNX2_LINK_STATUS_10FULL:
1967                                bp->line_speed = SPEED_10;
1968                                break;
1969                        case BNX2_LINK_STATUS_100HALF:
1970                                bp->duplex = DUPLEX_HALF;
1971                        case BNX2_LINK_STATUS_100BASE_T4:
1972                        case BNX2_LINK_STATUS_100FULL:
1973                                bp->line_speed = SPEED_100;
1974                                break;
1975                        case BNX2_LINK_STATUS_1000HALF:
1976                                bp->duplex = DUPLEX_HALF;
1977                        case BNX2_LINK_STATUS_1000FULL:
1978                                bp->line_speed = SPEED_1000;
1979                                break;
1980                        case BNX2_LINK_STATUS_2500HALF:
1981                                bp->duplex = DUPLEX_HALF;
1982                        case BNX2_LINK_STATUS_2500FULL:
1983                                bp->line_speed = SPEED_2500;
1984                                break;
1985                        default:
1986                                bp->line_speed = 0;
1987                                break;
1988                }
1989
1990                bp->flow_ctrl = 0;
1991                if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1992                    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1993                        if (bp->duplex == DUPLEX_FULL)
1994                                bp->flow_ctrl = bp->req_flow_ctrl;
1995                } else {
1996                        if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1997                                bp->flow_ctrl |= FLOW_CTRL_TX;
1998                        if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1999                                bp->flow_ctrl |= FLOW_CTRL_RX;
2000                }
2001
2002                old_port = bp->phy_port;
2003                if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2004                        bp->phy_port = PORT_FIBRE;
2005                else
2006                        bp->phy_port = PORT_TP;
2007
2008                if (old_port != bp->phy_port)
2009                        bnx2_set_default_link(bp);
2010
2011        }
2012        if (bp->link_up != link_up)
2013                bnx2_report_link(bp);
2014
2015        bnx2_set_mac_link(bp);
2016}
2017
2018static int
2019bnx2_set_remote_link(struct bnx2 *bp)
2020{
2021        u32 evt_code;
2022
2023        evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2024        switch (evt_code) {
2025                case BNX2_FW_EVT_CODE_LINK_EVENT:
2026                        bnx2_remote_phy_event(bp);
2027                        break;
2028                case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2029                default:
2030                        bnx2_send_heart_beat(bp);
2031                        break;
2032        }
2033        return 0;
2034}
2035
2036static int
2037bnx2_setup_copper_phy(struct bnx2 *bp)
2038__releases(&bp->phy_lock)
2039__acquires(&bp->phy_lock)
2040{
2041        u32 bmcr;
2042        u32 new_bmcr;
2043
2044        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2045
2046        if (bp->autoneg & AUTONEG_SPEED) {
2047                u32 adv_reg, adv1000_reg;
2048                u32 new_adv_reg = 0;
2049                u32 new_adv1000_reg = 0;
2050
2051                bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2052                adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2053                        ADVERTISE_PAUSE_ASYM);
2054
2055                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2056                adv1000_reg &= PHY_ALL_1000_SPEED;
2057
2058                if (bp->advertising & ADVERTISED_10baseT_Half)
2059                        new_adv_reg |= ADVERTISE_10HALF;
2060                if (bp->advertising & ADVERTISED_10baseT_Full)
2061                        new_adv_reg |= ADVERTISE_10FULL;
2062                if (bp->advertising & ADVERTISED_100baseT_Half)
2063                        new_adv_reg |= ADVERTISE_100HALF;
2064                if (bp->advertising & ADVERTISED_100baseT_Full)
2065                        new_adv_reg |= ADVERTISE_100FULL;
2066                if (bp->advertising & ADVERTISED_1000baseT_Full)
2067                        new_adv1000_reg |= ADVERTISE_1000FULL;
2068
2069                new_adv_reg |= ADVERTISE_CSMA;
2070
2071                new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2072
2073                if ((adv1000_reg != new_adv1000_reg) ||
2074                        (adv_reg != new_adv_reg) ||
2075                        ((bmcr & BMCR_ANENABLE) == 0)) {
2076
2077                        bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2078                        bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2079                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080                                BMCR_ANENABLE);
2081                }
2082                else if (bp->link_up) {
2083                        /* Flow ctrl may have changed from auto to forced */
2084                        /* or vice-versa. */
2085
2086                        bnx2_resolve_flow_ctrl(bp);
2087                        bnx2_set_mac_link(bp);
2088                }
2089                return 0;
2090        }
2091
2092        new_bmcr = 0;
2093        if (bp->req_line_speed == SPEED_100) {
2094                new_bmcr |= BMCR_SPEED100;
2095        }
2096        if (bp->req_duplex == DUPLEX_FULL) {
2097                new_bmcr |= BMCR_FULLDPLX;
2098        }
2099        if (new_bmcr != bmcr) {
2100                u32 bmsr;
2101
2102                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104
2105                if (bmsr & BMSR_LSTATUS) {
2106                        /* Force link down */
2107                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108                        spin_unlock_bh(&bp->phy_lock);
2109                        msleep(50);
2110                        spin_lock_bh(&bp->phy_lock);
2111
2112                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114                }
2115
2116                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117
2118                /* Normally, the new speed is setup after the link has
2119                 * gone down and up again. In some cases, link will not go
2120                 * down so we need to set up the new speed here.
2121                 */
2122                if (bmsr & BMSR_LSTATUS) {
2123                        bp->line_speed = bp->req_line_speed;
2124                        bp->duplex = bp->req_duplex;
2125                        bnx2_resolve_flow_ctrl(bp);
2126                        bnx2_set_mac_link(bp);
2127                }
2128        } else {
2129                bnx2_resolve_flow_ctrl(bp);
2130                bnx2_set_mac_link(bp);
2131        }
2132        return 0;
2133}
2134
2135static int
2136bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137__releases(&bp->phy_lock)
2138__acquires(&bp->phy_lock)
2139{
2140        if (bp->loopback == MAC_LOOPBACK)
2141                return 0;
2142
2143        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144                return bnx2_setup_serdes_phy(bp, port);
2145        }
2146        else {
2147                return bnx2_setup_copper_phy(bp);
2148        }
2149}
2150
2151static int
2152bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153{
2154        u32 val;
2155
2156        bp->mii_bmcr = MII_BMCR + 0x10;
2157        bp->mii_bmsr = MII_BMSR + 0x10;
2158        bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159        bp->mii_adv = MII_ADVERTISE + 0x10;
2160        bp->mii_lpa = MII_LPA + 0x10;
2161        bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164        bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167        if (reset_phy)
2168                bnx2_reset_phy(bp);
2169
2170        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172        bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173        val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174        val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178        bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180                val |= BCM5708S_UP1_2G5;
2181        else
2182                val &= ~BCM5708S_UP1_2G5;
2183        bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186        bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187        val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188        bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192        val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193              MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194        bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198        return 0;
2199}
2200
2201static int
2202bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203{
2204        u32 val;
2205
2206        if (reset_phy)
2207                bnx2_reset_phy(bp);
2208
2209        bp->mii_up1 = BCM5708S_UP1;
2210
2211        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212        bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215        bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216        val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217        bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219        bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220        val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221        bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
2223        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224                bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225                val |= BCM5708S_UP1_2G5;
2226                bnx2_write_phy(bp, BCM5708S_UP1, val);
2227        }
2228
2229        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231            (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232                /* increase tx signal amplitude */
2233                bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234                               BCM5708S_BLK_ADDR_TX_MISC);
2235                bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236                val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237                bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238                bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239        }
2240
2241        val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242              BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244        if (val) {
2245                u32 is_backplane;
2246
2247                is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248                if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250                                       BCM5708S_BLK_ADDR_TX_MISC);
2251                        bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253                                       BCM5708S_BLK_ADDR_DIG);
2254                }
2255        }
2256        return 0;
2257}
2258
2259static int
2260bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261{
2262        if (reset_phy)
2263                bnx2_reset_phy(bp);
2264
2265        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266
2267        if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268                REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269
2270        if (bp->dev->mtu > 1500) {
2271                u32 val;
2272
2273                /* Set extended packet length bit */
2274                bnx2_write_phy(bp, 0x18, 0x7);
2275                bnx2_read_phy(bp, 0x18, &val);
2276                bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278                bnx2_write_phy(bp, 0x1c, 0x6c00);
2279                bnx2_read_phy(bp, 0x1c, &val);
2280                bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281        }
2282        else {
2283                u32 val;
2284
2285                bnx2_write_phy(bp, 0x18, 0x7);
2286                bnx2_read_phy(bp, 0x18, &val);
2287                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289                bnx2_write_phy(bp, 0x1c, 0x6c00);
2290                bnx2_read_phy(bp, 0x1c, &val);
2291                bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292        }
2293
2294        return 0;
2295}
2296
2297static int
2298bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299{
2300        u32 val;
2301
2302        if (reset_phy)
2303                bnx2_reset_phy(bp);
2304
2305        if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306                bnx2_write_phy(bp, 0x18, 0x0c00);
2307                bnx2_write_phy(bp, 0x17, 0x000a);
2308                bnx2_write_phy(bp, 0x15, 0x310b);
2309                bnx2_write_phy(bp, 0x17, 0x201f);
2310                bnx2_write_phy(bp, 0x15, 0x9506);
2311                bnx2_write_phy(bp, 0x17, 0x401f);
2312                bnx2_write_phy(bp, 0x15, 0x14e2);
2313                bnx2_write_phy(bp, 0x18, 0x0400);
2314        }
2315
2316        if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317                bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318                               MII_BNX2_DSP_EXPAND_REG | 0x8);
2319                bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320                val &= ~(1 << 8);
2321                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322        }
2323
2324        if (bp->dev->mtu > 1500) {
2325                /* Set extended packet length bit */
2326                bnx2_write_phy(bp, 0x18, 0x7);
2327                bnx2_read_phy(bp, 0x18, &val);
2328                bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330                bnx2_read_phy(bp, 0x10, &val);
2331                bnx2_write_phy(bp, 0x10, val | 0x1);
2332        }
2333        else {
2334                bnx2_write_phy(bp, 0x18, 0x7);
2335                bnx2_read_phy(bp, 0x18, &val);
2336                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338                bnx2_read_phy(bp, 0x10, &val);
2339                bnx2_write_phy(bp, 0x10, val & ~0x1);
2340        }
2341
2342        /* ethernet@wirespeed */
2343        bnx2_write_phy(bp, 0x18, 0x7007);
2344        bnx2_read_phy(bp, 0x18, &val);
2345        bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346        return 0;
2347}
2348
2349
2350static int
2351bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352__releases(&bp->phy_lock)
2353__acquires(&bp->phy_lock)
2354{
2355        u32 val;
2356        int rc = 0;
2357
2358        bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359        bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360
2361        bp->mii_bmcr = MII_BMCR;
2362        bp->mii_bmsr = MII_BMSR;
2363        bp->mii_bmsr1 = MII_BMSR;
2364        bp->mii_adv = MII_ADVERTISE;
2365        bp->mii_lpa = MII_LPA;
2366
2367        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
2369        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370                goto setup_phy;
2371
2372        bnx2_read_phy(bp, MII_PHYSID1, &val);
2373        bp->phy_id = val << 16;
2374        bnx2_read_phy(bp, MII_PHYSID2, &val);
2375        bp->phy_id |= val & 0xffff;
2376
2377        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378                if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379                        rc = bnx2_init_5706s_phy(bp, reset_phy);
2380                else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381                        rc = bnx2_init_5708s_phy(bp, reset_phy);
2382                else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383                        rc = bnx2_init_5709s_phy(bp, reset_phy);
2384        }
2385        else {
2386                rc = bnx2_init_copper_phy(bp, reset_phy);
2387        }
2388
2389setup_phy:
2390        if (!rc)
2391                rc = bnx2_setup_phy(bp, bp->phy_port);
2392
2393        return rc;
2394}
2395
2396static int
2397bnx2_set_mac_loopback(struct bnx2 *bp)
2398{
2399        u32 mac_mode;
2400
2401        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402        mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403        mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405        bp->link_up = 1;
2406        return 0;
2407}
2408
2409static int bnx2_test_link(struct bnx2 *);
2410
2411static int
2412bnx2_set_phy_loopback(struct bnx2 *bp)
2413{
2414        u32 mac_mode;
2415        int rc, i;
2416
2417        spin_lock_bh(&bp->phy_lock);
2418        rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419                            BMCR_SPEED1000);
2420        spin_unlock_bh(&bp->phy_lock);
2421        if (rc)
2422                return rc;
2423
2424        for (i = 0; i < 10; i++) {
2425                if (bnx2_test_link(bp) == 0)
2426                        break;
2427                msleep(100);
2428        }
2429
2430        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431        mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432                      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433                      BNX2_EMAC_MODE_25G_MODE);
2434
2435        mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437        bp->link_up = 1;
2438        return 0;
2439}
2440
2441static int
2442bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2443{
2444        int i;
2445        u32 val;
2446
2447        bp->fw_wr_seq++;
2448        msg_data |= bp->fw_wr_seq;
2449
2450        bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2451
2452        if (!ack)
2453                return 0;
2454
2455        /* wait for an acknowledgement. */
2456        for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2457                msleep(10);
2458
2459                val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2460
2461                if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2462                        break;
2463        }
2464        if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2465                return 0;
2466
2467        /* If we timed out, inform the firmware that this is the case. */
2468        if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2469                if (!silent)
2470                        pr_err("fw sync timeout, reset code = %x\n", msg_data);
2471
2472                msg_data &= ~BNX2_DRV_MSG_CODE;
2473                msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2474
2475                bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2476
2477                return -EBUSY;
2478        }
2479
2480        if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2481                return -EIO;
2482
2483        return 0;
2484}
2485
2486static int
2487bnx2_init_5709_context(struct bnx2 *bp)
2488{
2489        int i, ret = 0;
2490        u32 val;
2491
2492        val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2493        val |= (BCM_PAGE_BITS - 8) << 16;
2494        REG_WR(bp, BNX2_CTX_COMMAND, val);
2495        for (i = 0; i < 10; i++) {
2496                val = REG_RD(bp, BNX2_CTX_COMMAND);
2497                if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2498                        break;
2499                udelay(2);
2500        }
2501        if (val & BNX2_CTX_COMMAND_MEM_INIT)
2502                return -EBUSY;
2503
2504        for (i = 0; i < bp->ctx_pages; i++) {
2505                int j;
2506
2507                if (bp->ctx_blk[i])
2508                        memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2509                else
2510                        return -ENOMEM;
2511
2512                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2513                       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2514                       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2515                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2516                       (u64) bp->ctx_blk_mapping[i] >> 32);
2517                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2518                       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2519                for (j = 0; j < 10; j++) {
2520
2521                        val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2522                        if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2523                                break;
2524                        udelay(5);
2525                }
2526                if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2527                        ret = -EBUSY;
2528                        break;
2529                }
2530        }
2531        return ret;
2532}
2533
2534static void
2535bnx2_init_context(struct bnx2 *bp)
2536{
2537        u32 vcid;
2538
2539        vcid = 96;
2540        while (vcid) {
2541                u32 vcid_addr, pcid_addr, offset;
2542                int i;
2543
2544                vcid--;
2545
2546                if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2547                        u32 new_vcid;
2548
2549                        vcid_addr = GET_PCID_ADDR(vcid);
2550                        if (vcid & 0x8) {
2551                                new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2552                        }
2553                        else {
2554                                new_vcid = vcid;
2555                        }
2556                        pcid_addr = GET_PCID_ADDR(new_vcid);
2557                }
2558                else {
2559                        vcid_addr = GET_CID_ADDR(vcid);
2560                        pcid_addr = vcid_addr;
2561                }
2562
2563                for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2564                        vcid_addr += (i << PHY_CTX_SHIFT);
2565                        pcid_addr += (i << PHY_CTX_SHIFT);
2566
2567                        REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2568                        REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2569
2570                        /* Zero out the context. */
2571                        for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2572                                bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2573                }
2574        }
2575}
2576
2577static int
2578bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2579{
2580        u16 *good_mbuf;
2581        u32 good_mbuf_cnt;
2582        u32 val;
2583
2584        good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2585        if (good_mbuf == NULL) {
2586                pr_err("Failed to allocate memory in %s\n", __func__);
2587                return -ENOMEM;
2588        }
2589
2590        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2591                BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2592
2593        good_mbuf_cnt = 0;
2594
2595        /* Allocate a bunch of mbufs and save the good ones in an array. */
2596        val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2597        while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2598                bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2599                                BNX2_RBUF_COMMAND_ALLOC_REQ);
2600
2601                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2602
2603                val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2604
2605                /* The addresses with Bit 9 set are bad memory blocks. */
2606                if (!(val & (1 << 9))) {
2607                        good_mbuf[good_mbuf_cnt] = (u16) val;
2608                        good_mbuf_cnt++;
2609                }
2610
2611                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2612        }
2613
2614        /* Free the good ones back to the mbuf pool thus discarding
2615         * all the bad ones. */
2616        while (good_mbuf_cnt) {
2617                good_mbuf_cnt--;
2618
2619                val = good_mbuf[good_mbuf_cnt];
2620                val = (val << 9) | val | 1;
2621
2622                bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2623        }
2624        kfree(good_mbuf);
2625        return 0;
2626}
2627
2628static void
2629bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2630{
2631        u32 val;
2632
2633        val = (mac_addr[0] << 8) | mac_addr[1];
2634
2635        REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2636
2637        val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2638                (mac_addr[4] << 8) | mac_addr[5];
2639
2640        REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2641}
2642
2643static inline int
2644bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2645{
2646        dma_addr_t mapping;
2647        struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2648        struct rx_bd *rxbd =
2649                &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2650        struct page *page = alloc_page(gfp);
2651
2652        if (!page)
2653                return -ENOMEM;
2654        mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2655                               PCI_DMA_FROMDEVICE);
2656        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2657                __free_page(page);
2658                return -EIO;
2659        }
2660
2661        rx_pg->page = page;
2662        dma_unmap_addr_set(rx_pg, mapping, mapping);
2663        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2664        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2665        return 0;
2666}
2667
2668static void
2669bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670{
2671        struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2672        struct page *page = rx_pg->page;
2673
2674        if (!page)
2675                return;
2676
2677        dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2678                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2679
2680        __free_page(page);
2681        rx_pg->page = NULL;
2682}
2683
2684static inline int
2685bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2686{
2687        struct sk_buff *skb;
2688        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2689        dma_addr_t mapping;
2690        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2691        unsigned long align;
2692
2693        skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2694        if (skb == NULL) {
2695                return -ENOMEM;
2696        }
2697
2698        if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2699                skb_reserve(skb, BNX2_RX_ALIGN - align);
2700
2701        mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2702                                 PCI_DMA_FROMDEVICE);
2703        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2704                dev_kfree_skb(skb);
2705                return -EIO;
2706        }
2707
2708        rx_buf->skb = skb;
2709        rx_buf->desc = (struct l2_fhdr *) skb->data;
2710        dma_unmap_addr_set(rx_buf, mapping, mapping);
2711
2712        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2713        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2714
2715        rxr->rx_prod_bseq += bp->rx_buf_use_size;
2716
2717        return 0;
2718}
2719
2720static int
2721bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2722{
2723        struct status_block *sblk = bnapi->status_blk.msi;
2724        u32 new_link_state, old_link_state;
2725        int is_set = 1;
2726
2727        new_link_state = sblk->status_attn_bits & event;
2728        old_link_state = sblk->status_attn_bits_ack & event;
2729        if (new_link_state != old_link_state) {
2730                if (new_link_state)
2731                        REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2732                else
2733                        REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2734        } else
2735                is_set = 0;
2736
2737        return is_set;
2738}
2739
2740static void
2741bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2742{
2743        spin_lock(&bp->phy_lock);
2744
2745        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2746                bnx2_set_link(bp);
2747        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2748                bnx2_set_remote_link(bp);
2749
2750        spin_unlock(&bp->phy_lock);
2751
2752}
2753
2754static inline u16
2755bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2756{
2757        u16 cons;
2758
2759        /* Tell compiler that status block fields can change. */
2760        barrier();
2761        cons = *bnapi->hw_tx_cons_ptr;
2762        barrier();
2763        if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2764                cons++;
2765        return cons;
2766}
2767
2768static int
2769bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2770{
2771        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2772        u16 hw_cons, sw_cons, sw_ring_cons;
2773        int tx_pkt = 0, index;
2774        struct netdev_queue *txq;
2775
2776        index = (bnapi - bp->bnx2_napi);
2777        txq = netdev_get_tx_queue(bp->dev, index);
2778
2779        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2780        sw_cons = txr->tx_cons;
2781
2782        while (sw_cons != hw_cons) {
2783                struct sw_tx_bd *tx_buf;
2784                struct sk_buff *skb;
2785                int i, last;
2786
2787                sw_ring_cons = TX_RING_IDX(sw_cons);
2788
2789                tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2790                skb = tx_buf->skb;
2791
2792                /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2793                prefetch(&skb->end);
2794
2795                /* partial BD completions possible with TSO packets */
2796                if (tx_buf->is_gso) {
2797                        u16 last_idx, last_ring_idx;
2798
2799                        last_idx = sw_cons + tx_buf->nr_frags + 1;
2800                        last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2801                        if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2802                                last_idx++;
2803                        }
2804                        if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2805                                break;
2806                        }
2807                }
2808
2809                dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2810                        skb_headlen(skb), PCI_DMA_TODEVICE);
2811
2812                tx_buf->skb = NULL;
2813                last = tx_buf->nr_frags;
2814
2815                for (i = 0; i < last; i++) {
2816                        sw_cons = NEXT_TX_BD(sw_cons);
2817
2818                        dma_unmap_page(&bp->pdev->dev,
2819                                dma_unmap_addr(
2820                                        &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2821                                        mapping),
2822                                skb_shinfo(skb)->frags[i].size,
2823                                PCI_DMA_TODEVICE);
2824                }
2825
2826                sw_cons = NEXT_TX_BD(sw_cons);
2827
2828                dev_kfree_skb(skb);
2829                tx_pkt++;
2830                if (tx_pkt == budget)
2831                        break;
2832
2833                if (hw_cons == sw_cons)
2834                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2835        }
2836
2837        txr->hw_tx_cons = hw_cons;
2838        txr->tx_cons = sw_cons;
2839
2840        /* Need to make the tx_cons update visible to bnx2_start_xmit()
2841         * before checking for netif_tx_queue_stopped().  Without the
2842         * memory barrier, there is a small possibility that bnx2_start_xmit()
2843         * will miss it and cause the queue to be stopped forever.
2844         */
2845        smp_mb();
2846
2847        if (unlikely(netif_tx_queue_stopped(txq)) &&
2848                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2849                __netif_tx_lock(txq, smp_processor_id());
2850                if ((netif_tx_queue_stopped(txq)) &&
2851                    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2852                        netif_tx_wake_queue(txq);
2853                __netif_tx_unlock(txq);
2854        }
2855
2856        return tx_pkt;
2857}
2858
2859static void
2860bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2861                        struct sk_buff *skb, int count)
2862{
2863        struct sw_pg *cons_rx_pg, *prod_rx_pg;
2864        struct rx_bd *cons_bd, *prod_bd;
2865        int i;
2866        u16 hw_prod, prod;
2867        u16 cons = rxr->rx_pg_cons;
2868
2869        cons_rx_pg = &rxr->rx_pg_ring[cons];
2870
2871        /* The caller was unable to allocate a new page to replace the
2872         * last one in the frags array, so we need to recycle that page
2873         * and then free the skb.
2874         */
2875        if (skb) {
2876                struct page *page;
2877                struct skb_shared_info *shinfo;
2878
2879                shinfo = skb_shinfo(skb);
2880                shinfo->nr_frags--;
2881                page = shinfo->frags[shinfo->nr_frags].page;
2882                shinfo->frags[shinfo->nr_frags].page = NULL;
2883
2884                cons_rx_pg->page = page;
2885                dev_kfree_skb(skb);
2886        }
2887
2888        hw_prod = rxr->rx_pg_prod;
2889
2890        for (i = 0; i < count; i++) {
2891                prod = RX_PG_RING_IDX(hw_prod);
2892
2893                prod_rx_pg = &rxr->rx_pg_ring[prod];
2894                cons_rx_pg = &rxr->rx_pg_ring[cons];
2895                cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2896                prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2897
2898                if (prod != cons) {
2899                        prod_rx_pg->page = cons_rx_pg->page;
2900                        cons_rx_pg->page = NULL;
2901                        dma_unmap_addr_set(prod_rx_pg, mapping,
2902                                dma_unmap_addr(cons_rx_pg, mapping));
2903
2904                        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2905                        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2906
2907                }
2908                cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2909                hw_prod = NEXT_RX_BD(hw_prod);
2910        }
2911        rxr->rx_pg_prod = hw_prod;
2912        rxr->rx_pg_cons = cons;
2913}
2914
2915static inline void
2916bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2917                  struct sk_buff *skb, u16 cons, u16 prod)
2918{
2919        struct sw_bd *cons_rx_buf, *prod_rx_buf;
2920        struct rx_bd *cons_bd, *prod_bd;
2921
2922        cons_rx_buf = &rxr->rx_buf_ring[cons];
2923        prod_rx_buf = &rxr->rx_buf_ring[prod];
2924
2925        dma_sync_single_for_device(&bp->pdev->dev,
2926                dma_unmap_addr(cons_rx_buf, mapping),
2927                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2928
2929        rxr->rx_prod_bseq += bp->rx_buf_use_size;
2930
2931        prod_rx_buf->skb = skb;
2932        prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2933
2934        if (cons == prod)
2935                return;
2936
2937        dma_unmap_addr_set(prod_rx_buf, mapping,
2938                        dma_unmap_addr(cons_rx_buf, mapping));
2939
2940        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2941        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2942        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2943        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2944}
2945
2946static int
2947bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2948            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2949            u32 ring_idx)
2950{
2951        int err;
2952        u16 prod = ring_idx & 0xffff;
2953
2954        err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2955        if (unlikely(err)) {
2956                bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2957                if (hdr_len) {
2958                        unsigned int raw_len = len + 4;
2959                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2960
2961                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2962                }
2963                return err;
2964        }
2965
2966        skb_reserve(skb, BNX2_RX_OFFSET);
2967        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2968                         PCI_DMA_FROMDEVICE);
2969
2970        if (hdr_len == 0) {
2971                skb_put(skb, len);
2972                return 0;
2973        } else {
2974                unsigned int i, frag_len, frag_size, pages;
2975                struct sw_pg *rx_pg;
2976                u16 pg_cons = rxr->rx_pg_cons;
2977                u16 pg_prod = rxr->rx_pg_prod;
2978
2979                frag_size = len + 4 - hdr_len;
2980                pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2981                skb_put(skb, hdr_len);
2982
2983                for (i = 0; i < pages; i++) {
2984                        dma_addr_t mapping_old;
2985
2986                        frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2987                        if (unlikely(frag_len <= 4)) {
2988                                unsigned int tail = 4 - frag_len;
2989
2990                                rxr->rx_pg_cons = pg_cons;
2991                                rxr->rx_pg_prod = pg_prod;
2992                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2993                                                        pages - i);
2994                                skb->len -= tail;
2995                                if (i == 0) {
2996                                        skb->tail -= tail;
2997                                } else {
2998                                        skb_frag_t *frag =
2999                                                &skb_shinfo(skb)->frags[i - 1];
3000                                        frag->size -= tail;
3001                                        skb->data_len -= tail;
3002                                        skb->truesize -= tail;
3003                                }
3004                                return 0;
3005                        }
3006                        rx_pg = &rxr->rx_pg_ring[pg_cons];
3007
3008                        /* Don't unmap yet.  If we're unable to allocate a new
3009                         * page, we need to recycle the page and the DMA addr.
3010                         */
3011                        mapping_old = dma_unmap_addr(rx_pg, mapping);
3012                        if (i == pages - 1)
3013                                frag_len -= 4;
3014
3015                        skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3016                        rx_pg->page = NULL;
3017
3018                        err = bnx2_alloc_rx_page(bp, rxr,
3019                                                 RX_PG_RING_IDX(pg_prod),
3020                                                 GFP_ATOMIC);
3021                        if (unlikely(err)) {
3022                                rxr->rx_pg_cons = pg_cons;
3023                                rxr->rx_pg_prod = pg_prod;
3024                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3025                                                        pages - i);
3026                                return err;
3027                        }
3028
3029                        dma_unmap_page(&bp->pdev->dev, mapping_old,
3030                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3031
3032                        frag_size -= frag_len;
3033                        skb->data_len += frag_len;
3034                        skb->truesize += frag_len;
3035                        skb->len += frag_len;
3036
3037                        pg_prod = NEXT_RX_BD(pg_prod);
3038                        pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3039                }
3040                rxr->rx_pg_prod = pg_prod;
3041                rxr->rx_pg_cons = pg_cons;
3042        }
3043        return 0;
3044}
3045
3046static inline u16
3047bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3048{
3049        u16 cons;
3050
3051        /* Tell compiler that status block fields can change. */
3052        barrier();
3053        cons = *bnapi->hw_rx_cons_ptr;
3054        barrier();
3055        if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3056                cons++;
3057        return cons;
3058}
3059
3060static int
3061bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3062{
3063        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3064        u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3065        struct l2_fhdr *rx_hdr;
3066        int rx_pkt = 0, pg_ring_used = 0;
3067
3068        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3069        sw_cons = rxr->rx_cons;
3070        sw_prod = rxr->rx_prod;
3071
3072        /* Memory barrier necessary as speculative reads of the rx
3073         * buffer can be ahead of the index in the status block
3074         */
3075        rmb();
3076        while (sw_cons != hw_cons) {
3077                unsigned int len, hdr_len;
3078                u32 status;
3079                struct sw_bd *rx_buf, *next_rx_buf;
3080                struct sk_buff *skb;
3081                dma_addr_t dma_addr;
3082
3083                sw_ring_cons = RX_RING_IDX(sw_cons);
3084                sw_ring_prod = RX_RING_IDX(sw_prod);
3085
3086                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3087                skb = rx_buf->skb;
3088                prefetchw(skb);
3089
3090                next_rx_buf =
3091                        &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3092                prefetch(next_rx_buf->desc);
3093
3094                rx_buf->skb = NULL;
3095
3096                dma_addr = dma_unmap_addr(rx_buf, mapping);
3097
3098                dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3099                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3100                        PCI_DMA_FROMDEVICE);
3101
3102                rx_hdr = rx_buf->desc;
3103                len = rx_hdr->l2_fhdr_pkt_len;
3104                status = rx_hdr->l2_fhdr_status;
3105
3106                hdr_len = 0;
3107                if (status & L2_FHDR_STATUS_SPLIT) {
3108                        hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3109                        pg_ring_used = 1;
3110                } else if (len > bp->rx_jumbo_thresh) {
3111                        hdr_len = bp->rx_jumbo_thresh;
3112                        pg_ring_used = 1;
3113                }
3114
3115                if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3116                                       L2_FHDR_ERRORS_PHY_DECODE |
3117                                       L2_FHDR_ERRORS_ALIGNMENT |
3118                                       L2_FHDR_ERRORS_TOO_SHORT |
3119                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
3120
3121                        bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3122                                          sw_ring_prod);
3123                        if (pg_ring_used) {
3124                                int pages;
3125
3126                                pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3127
3128                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3129                        }
3130                        goto next_rx;
3131                }
3132
3133                len -= 4;
3134
3135                if (len <= bp->rx_copy_thresh) {
3136                        struct sk_buff *new_skb;
3137
3138                        new_skb = netdev_alloc_skb(bp->dev, len + 6);
3139                        if (new_skb == NULL) {
3140                                bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3141                                                  sw_ring_prod);
3142                                goto next_rx;
3143                        }
3144
3145                        /* aligned copy */
3146                        skb_copy_from_linear_data_offset(skb,
3147                                                         BNX2_RX_OFFSET - 6,
3148                                      new_skb->data, len + 6);
3149                        skb_reserve(new_skb, 6);
3150                        skb_put(new_skb, len);
3151
3152                        bnx2_reuse_rx_skb(bp, rxr, skb,
3153                                sw_ring_cons, sw_ring_prod);
3154
3155                        skb = new_skb;
3156                } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3157                           dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3158                        goto next_rx;
3159
3160                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3161                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3162                        __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3163
3164                skb->protocol = eth_type_trans(skb, bp->dev);
3165
3166                if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3167                        (ntohs(skb->protocol) != 0x8100)) {
3168
3169                        dev_kfree_skb(skb);
3170                        goto next_rx;
3171
3172                }
3173
3174                skb_checksum_none_assert(skb);
3175                if (bp->rx_csum &&
3176                        (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3177                        L2_FHDR_STATUS_UDP_DATAGRAM))) {
3178
3179                        if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3180                                              L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3181                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3182                }
3183                if ((bp->dev->features & NETIF_F_RXHASH) &&
3184                    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3185                     L2_FHDR_STATUS_USE_RXHASH))
3186                        skb->rxhash = rx_hdr->l2_fhdr_hash;
3187
3188                skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3189                napi_gro_receive(&bnapi->napi, skb);
3190                rx_pkt++;
3191
3192next_rx:
3193                sw_cons = NEXT_RX_BD(sw_cons);
3194                sw_prod = NEXT_RX_BD(sw_prod);
3195
3196                if ((rx_pkt == budget))
3197                        break;
3198
3199                /* Refresh hw_cons to see if there is new work */
3200                if (sw_cons == hw_cons) {
3201                        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3202                        rmb();
3203                }
3204        }
3205        rxr->rx_cons = sw_cons;
3206        rxr->rx_prod = sw_prod;
3207
3208        if (pg_ring_used)
3209                REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3210
3211        REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3212
3213        REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3214
3215        mmiowb();
3216
3217        return rx_pkt;
3218
3219}
3220
3221/* MSI ISR - The only difference between this and the INTx ISR
3222 * is that the MSI interrupt is always serviced.
3223 */
3224static irqreturn_t
3225bnx2_msi(int irq, void *dev_instance)
3226{
3227        struct bnx2_napi *bnapi = dev_instance;
3228        struct bnx2 *bp = bnapi->bp;
3229
3230        prefetch(bnapi->status_blk.msi);
3231        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3232                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3233                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3234
3235        /* Return here if interrupt is disabled. */
3236        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3237                return IRQ_HANDLED;
3238
3239        napi_schedule(&bnapi->napi);
3240
3241        return IRQ_HANDLED;
3242}
3243
3244static irqreturn_t
3245bnx2_msi_1shot(int irq, void *dev_instance)
3246{
3247        struct bnx2_napi *bnapi = dev_instance;
3248        struct bnx2 *bp = bnapi->bp;
3249
3250        prefetch(bnapi->status_blk.msi);
3251
3252        /* Return here if interrupt is disabled. */
3253        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3254                return IRQ_HANDLED;
3255
3256        napi_schedule(&bnapi->napi);
3257
3258        return IRQ_HANDLED;
3259}
3260
3261static irqreturn_t
3262bnx2_interrupt(int irq, void *dev_instance)
3263{
3264        struct bnx2_napi *bnapi = dev_instance;
3265        struct bnx2 *bp = bnapi->bp;
3266        struct status_block *sblk = bnapi->status_blk.msi;
3267
3268        /* When using INTx, it is possible for the interrupt to arrive
3269         * at the CPU before the status block posted prior to the
3270         * interrupt. Reading a register will flush the status block.
3271         * When using MSI, the MSI message will always complete after
3272         * the status block write.
3273         */
3274        if ((sblk->status_idx == bnapi->last_status_idx) &&
3275            (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3276             BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3277                return IRQ_NONE;
3278
3279        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3280                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3281                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3282
3283        /* Read back to deassert IRQ immediately to avoid too many
3284         * spurious interrupts.
3285         */
3286        REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3287
3288        /* Return here if interrupt is shared and is disabled. */
3289        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290                return IRQ_HANDLED;
3291
3292        if (napi_schedule_prep(&bnapi->napi)) {
3293                bnapi->last_status_idx = sblk->status_idx;
3294                __napi_schedule(&bnapi->napi);
3295        }
3296
3297        return IRQ_HANDLED;
3298}
3299
3300static inline int
3301bnx2_has_fast_work(struct bnx2_napi *bnapi)
3302{
3303        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3304        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3305
3306        if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3307            (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3308                return 1;
3309        return 0;
3310}
3311
3312#define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3313                                 STATUS_ATTN_BITS_TIMER_ABORT)
3314
3315static inline int
3316bnx2_has_work(struct bnx2_napi *bnapi)
3317{
3318        struct status_block *sblk = bnapi->status_blk.msi;
3319
3320        if (bnx2_has_fast_work(bnapi))
3321                return 1;
3322
3323#ifdef BCM_CNIC
3324        if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3325                return 1;
3326#endif
3327
3328        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3329            (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3330                return 1;
3331
3332        return 0;
3333}
3334
3335static void
3336bnx2_chk_missed_msi(struct bnx2 *bp)
3337{
3338        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3339        u32 msi_ctrl;
3340
3341        if (bnx2_has_work(bnapi)) {
3342                msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3343                if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3344                        return;
3345
3346                if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3347                        REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3348                               ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3349                        REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3350                        bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3351                }
3352        }
3353
3354        bp->idle_chk_status_idx = bnapi->last_status_idx;
3355}
3356
3357#ifdef BCM_CNIC
3358static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3359{
3360        struct cnic_ops *c_ops;
3361
3362        if (!bnapi->cnic_present)
3363                return;
3364
3365        rcu_read_lock();
3366        c_ops = rcu_dereference(bp->cnic_ops);
3367        if (c_ops)
3368                bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3369                                                      bnapi->status_blk.msi);
3370        rcu_read_unlock();
3371}
3372#endif
3373
3374static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3375{
3376        struct status_block *sblk = bnapi->status_blk.msi;
3377        u32 status_attn_bits = sblk->status_attn_bits;
3378        u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3379
3380        if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3381            (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3382
3383                bnx2_phy_int(bp, bnapi);
3384
3385                /* This is needed to take care of transient status
3386                 * during link changes.
3387                 */
3388                REG_WR(bp, BNX2_HC_COMMAND,
3389                       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3390                REG_RD(bp, BNX2_HC_COMMAND);
3391        }
3392}
3393
3394static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3395                          int work_done, int budget)
3396{
3397        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3398        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3399
3400        if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3401                bnx2_tx_int(bp, bnapi, 0);
3402
3403        if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3404                work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3405
3406        return work_done;
3407}
3408
3409static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3410{
3411        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3412        struct bnx2 *bp = bnapi->bp;
3413        int work_done = 0;
3414        struct status_block_msix *sblk = bnapi->status_blk.msix;
3415
3416        while (1) {
3417                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3418                if (unlikely(work_done >= budget))
3419                        break;
3420
3421                bnapi->last_status_idx = sblk->status_idx;
3422                /* status idx must be read before checking for more work. */
3423                rmb();
3424                if (likely(!bnx2_has_fast_work(bnapi))) {
3425
3426                        napi_complete(napi);
3427                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3428                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3429                               bnapi->last_status_idx);
3430                        break;
3431                }
3432        }
3433        return work_done;
3434}
3435
3436static int bnx2_poll(struct napi_struct *napi, int budget)
3437{
3438        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3439        struct bnx2 *bp = bnapi->bp;
3440        int work_done = 0;
3441        struct status_block *sblk = bnapi->status_blk.msi;
3442
3443        while (1) {
3444                bnx2_poll_link(bp, bnapi);
3445
3446                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3447
3448#ifdef BCM_CNIC
3449                bnx2_poll_cnic(bp, bnapi);
3450#endif
3451
3452                /* bnapi->last_status_idx is used below to tell the hw how
3453                 * much work has been processed, so we must read it before
3454                 * checking for more work.
3455                 */
3456                bnapi->last_status_idx = sblk->status_idx;
3457
3458                if (unlikely(work_done >= budget))
3459                        break;
3460
3461                rmb();
3462                if (likely(!bnx2_has_work(bnapi))) {
3463                        napi_complete(napi);
3464                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3465                                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3466                                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3467                                       bnapi->last_status_idx);
3468                                break;
3469                        }
3470                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3471                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472                               BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3473                               bnapi->last_status_idx);
3474
3475                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477                               bnapi->last_status_idx);
3478                        break;
3479                }
3480        }
3481
3482        return work_done;
3483}
3484
3485/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3486 * from set_multicast.
3487 */
3488static void
3489bnx2_set_rx_mode(struct net_device *dev)
3490{
3491        struct bnx2 *bp = netdev_priv(dev);
3492        u32 rx_mode, sort_mode;
3493        struct netdev_hw_addr *ha;
3494        int i;
3495
3496        if (!netif_running(dev))
3497                return;
3498
3499        spin_lock_bh(&bp->phy_lock);
3500
3501        rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3502                                  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3503        sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3504        if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3505             (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3506                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3507        if (dev->flags & IFF_PROMISC) {
3508                /* Promiscuous mode. */
3509                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3510                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3511                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3512        }
3513        else if (dev->flags & IFF_ALLMULTI) {
3514                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3515                        REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3516                               0xffffffff);
3517                }
3518                sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3519        }
3520        else {
3521                /* Accept one or more multicast(s). */
3522                u32 mc_filter[NUM_MC_HASH_REGISTERS];
3523                u32 regidx;
3524                u32 bit;
3525                u32 crc;
3526
3527                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3528
3529                netdev_for_each_mc_addr(ha, dev) {
3530                        crc = ether_crc_le(ETH_ALEN, ha->addr);
3531                        bit = crc & 0xff;
3532                        regidx = (bit & 0xe0) >> 5;
3533                        bit &= 0x1f;
3534                        mc_filter[regidx] |= (1 << bit);
3535                }
3536
3537                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3538                        REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3539                               mc_filter[i]);
3540                }
3541
3542                sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3543        }
3544
3545        if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3546                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3547                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3548                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3549        } else if (!(dev->flags & IFF_PROMISC)) {
3550                /* Add all entries into to the match filter list */
3551                i = 0;
3552                netdev_for_each_uc_addr(ha, dev) {
3553                        bnx2_set_mac_addr(bp, ha->addr,
3554                                          i + BNX2_START_UNICAST_ADDRESS_INDEX);
3555                        sort_mode |= (1 <<
3556                                      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3557                        i++;
3558                }
3559
3560        }
3561
3562        if (rx_mode != bp->rx_mode) {
3563                bp->rx_mode = rx_mode;
3564                REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3565        }
3566
3567        REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3568        REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3569        REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3570
3571        spin_unlock_bh(&bp->phy_lock);
3572}
3573
3574static int __devinit
3575check_fw_section(const struct firmware *fw,
3576                 const struct bnx2_fw_file_section *section,
3577                 u32 alignment, bool non_empty)
3578{
3579        u32 offset = be32_to_cpu(section->offset);
3580        u32 len = be32_to_cpu(section->len);
3581
3582        if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3583                return -EINVAL;
3584        if ((non_empty && len == 0) || len > fw->size - offset ||
3585            len & (alignment - 1))
3586                return -EINVAL;
3587        return 0;
3588}
3589
3590static int __devinit
3591check_mips_fw_entry(const struct firmware *fw,
3592                    const struct bnx2_mips_fw_file_entry *entry)
3593{
3594        if (check_fw_section(fw, &entry->text, 4, true) ||
3595            check_fw_section(fw, &entry->data, 4, false) ||
3596            check_fw_section(fw, &entry->rodata, 4, false))
3597                return -EINVAL;
3598        return 0;
3599}
3600
3601static int __devinit
3602bnx2_request_firmware(struct bnx2 *bp)
3603{
3604        const char *mips_fw_file, *rv2p_fw_file;
3605        const struct bnx2_mips_fw_file *mips_fw;
3606        const struct bnx2_rv2p_fw_file *rv2p_fw;
3607        int rc;
3608
3609        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3610                mips_fw_file = FW_MIPS_FILE_09;
3611                if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3612                    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3613                        rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3614                else
3615                        rv2p_fw_file = FW_RV2P_FILE_09;
3616        } else {
3617                mips_fw_file = FW_MIPS_FILE_06;
3618                rv2p_fw_file = FW_RV2P_FILE_06;
3619        }
3620
3621        rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3622        if (rc) {
3623                pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3624                return rc;
3625        }
3626
3627        rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3628        if (rc) {
3629                pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3630                return rc;
3631        }
3632        mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3633        rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3634        if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3635            check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3636            check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3637            check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3638            check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3639            check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3640                pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3641                return -EINVAL;
3642        }
3643        if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3644            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3645            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3646                pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3647                return -EINVAL;
3648        }
3649
3650        return 0;
3651}
3652
3653static u32
3654rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3655{
3656        switch (idx) {
3657        case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3658                rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3659                rv2p_code |= RV2P_BD_PAGE_SIZE;
3660                break;
3661        }
3662        return rv2p_code;
3663}
3664
3665static int
3666load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3667             const struct bnx2_rv2p_fw_file_entry *fw_entry)
3668{
3669        u32 rv2p_code_len, file_offset;
3670        __be32 *rv2p_code;
3671        int i;
3672        u32 val, cmd, addr;
3673
3674        rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3675        file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3676
3677        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3678
3679        if (rv2p_proc == RV2P_PROC1) {
3680                cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3681                addr = BNX2_RV2P_PROC1_ADDR_CMD;
3682        } else {
3683                cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3684                addr = BNX2_RV2P_PROC2_ADDR_CMD;
3685        }
3686
3687        for (i = 0; i < rv2p_code_len; i += 8) {
3688                REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3689                rv2p_code++;
3690                REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3691                rv2p_code++;
3692
3693                val = (i / 8) | cmd;
3694                REG_WR(bp, addr, val);
3695        }
3696
3697        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3698        for (i = 0; i < 8; i++) {
3699                u32 loc, code;
3700
3701                loc = be32_to_cpu(fw_entry->fixup[i]);
3702                if (loc && ((loc * 4) < rv2p_code_len)) {
3703                        code = be32_to_cpu(*(rv2p_code + loc - 1));
3704                        REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3705                        code = be32_to_cpu(*(rv2p_code + loc));
3706                        code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3707                        REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3708
3709                        val = (loc / 2) | cmd;
3710                        REG_WR(bp, addr, val);
3711                }
3712        }
3713
3714        /* Reset the processor, un-stall is done later. */
3715        if (rv2p_proc == RV2P_PROC1) {
3716                REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3717        }
3718        else {
3719                REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3720        }
3721
3722        return 0;
3723}
3724
3725static int
3726load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3727            const struct bnx2_mips_fw_file_entry *fw_entry)
3728{
3729        u32 addr, len, file_offset;
3730        __be32 *data;
3731        u32 offset;
3732        u32 val;
3733
3734        /* Halt the CPU. */
3735        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3736        val |= cpu_reg->mode_value_halt;
3737        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3738        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3739
3740        /* Load the Text area. */
3741        addr = be32_to_cpu(fw_entry->text.addr);
3742        len = be32_to_cpu(fw_entry->text.len);
3743        file_offset = be32_to_cpu(fw_entry->text.offset);
3744        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3745
3746        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3747        if (len) {
3748                int j;
3749
3750                for (j = 0; j < (len / 4); j++, offset += 4)
3751                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3752        }
3753
3754        /* Load the Data area. */
3755        addr = be32_to_cpu(fw_entry->data.addr);
3756        len = be32_to_cpu(fw_entry->data.len);
3757        file_offset = be32_to_cpu(fw_entry->data.offset);
3758        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3759
3760        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3761        if (len) {
3762                int j;
3763
3764                for (j = 0; j < (len / 4); j++, offset += 4)
3765                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3766        }
3767
3768        /* Load the Read-Only area. */
3769        addr = be32_to_cpu(fw_entry->rodata.addr);
3770        len = be32_to_cpu(fw_entry->rodata.len);
3771        file_offset = be32_to_cpu(fw_entry->rodata.offset);
3772        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3773
3774        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775        if (len) {
3776                int j;
3777
3778                for (j = 0; j < (len / 4); j++, offset += 4)
3779                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3780        }
3781
3782        /* Clear the pre-fetch instruction. */
3783        bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3784
3785        val = be32_to_cpu(fw_entry->start_addr);
3786        bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3787
3788        /* Start the CPU. */
3789        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3790        val &= ~cpu_reg->mode_value_halt;
3791        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3792        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3793
3794        return 0;
3795}
3796
3797static int
3798bnx2_init_cpus(struct bnx2 *bp)
3799{
3800        const struct bnx2_mips_fw_file *mips_fw =
3801                (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3802        const struct bnx2_rv2p_fw_file *rv2p_fw =
3803                (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3804        int rc;
3805
3806        /* Initialize the RV2P processor. */
3807        load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3808        load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3809
3810        /* Initialize the RX Processor. */
3811        rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3812        if (rc)
3813                goto init_cpu_err;
3814
3815        /* Initialize the TX Processor. */
3816        rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3817        if (rc)
3818                goto init_cpu_err;
3819
3820        /* Initialize the TX Patch-up Processor. */
3821        rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3822        if (rc)
3823                goto init_cpu_err;
3824
3825        /* Initialize the Completion Processor. */
3826        rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3827        if (rc)
3828                goto init_cpu_err;
3829
3830        /* Initialize the Command Processor. */
3831        rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3832
3833init_cpu_err:
3834        return rc;
3835}
3836
3837static int
3838bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3839{
3840        u16 pmcsr;
3841
3842        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3843
3844        switch (state) {
3845        case PCI_D0: {
3846                u32 val;
3847
3848                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3849                        (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3850                        PCI_PM_CTRL_PME_STATUS);
3851
3852                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3853                        /* delay required during transition out of D3hot */
3854                        msleep(20);
3855
3856                val = REG_RD(bp, BNX2_EMAC_MODE);
3857                val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3858                val &= ~BNX2_EMAC_MODE_MPKT;
3859                REG_WR(bp, BNX2_EMAC_MODE, val);
3860
3861                val = REG_RD(bp, BNX2_RPM_CONFIG);
3862                val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3863                REG_WR(bp, BNX2_RPM_CONFIG, val);
3864                break;
3865        }
3866        case PCI_D3hot: {
3867                int i;
3868                u32 val, wol_msg;
3869
3870                if (bp->wol) {
3871                        u32 advertising;
3872                        u8 autoneg;
3873
3874                        autoneg = bp->autoneg;
3875                        advertising = bp->advertising;
3876
3877                        if (bp->phy_port == PORT_TP) {
3878                                bp->autoneg = AUTONEG_SPEED;
3879                                bp->advertising = ADVERTISED_10baseT_Half |
3880                                        ADVERTISED_10baseT_Full |
3881                                        ADVERTISED_100baseT_Half |
3882                                        ADVERTISED_100baseT_Full |
3883                                        ADVERTISED_Autoneg;
3884                        }
3885
3886                        spin_lock_bh(&bp->phy_lock);
3887                        bnx2_setup_phy(bp, bp->phy_port);
3888                        spin_unlock_bh(&bp->phy_lock);
3889
3890                        bp->autoneg = autoneg;
3891                        bp->advertising = advertising;
3892
3893                        bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3894
3895                        val = REG_RD(bp, BNX2_EMAC_MODE);
3896
3897                        /* Enable port mode. */
3898                        val &= ~BNX2_EMAC_MODE_PORT;
3899                        val |= BNX2_EMAC_MODE_MPKT_RCVD |
3900                               BNX2_EMAC_MODE_ACPI_RCVD |
3901                               BNX2_EMAC_MODE_MPKT;
3902                        if (bp->phy_port == PORT_TP)
3903                                val |= BNX2_EMAC_MODE_PORT_MII;
3904                        else {
3905                                val |= BNX2_EMAC_MODE_PORT_GMII;
3906                                if (bp->line_speed == SPEED_2500)
3907                                        val |= BNX2_EMAC_MODE_25G_MODE;
3908                        }
3909
3910                        REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912                        /* receive all multicast */
3913                        for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3914                                REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3915                                       0xffffffff);
3916                        }
3917                        REG_WR(bp, BNX2_EMAC_RX_MODE,
3918                               BNX2_EMAC_RX_MODE_SORT_MODE);
3919
3920                        val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3921                              BNX2_RPM_SORT_USER0_MC_EN;
3922                        REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3923                        REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3924                        REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3925                               BNX2_RPM_SORT_USER0_ENA);
3926
3927                        /* Need to enable EMAC and RPM for WOL. */
3928                        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3929                               BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3930                               BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3931                               BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3932
3933                        val = REG_RD(bp, BNX2_RPM_CONFIG);
3934                        val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3935                        REG_WR(bp, BNX2_RPM_CONFIG, val);
3936
3937                        wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3938                }
3939                else {
3940                        wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3941                }
3942
3943                if (!(bp->flags & BNX2_FLAG_NO_WOL))
3944                        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3945                                     1, 0);
3946
3947                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3948                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3949                    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3950
3951                        if (bp->wol)
3952                                pmcsr |= 3;
3953                }
3954                else {
3955                        pmcsr |= 3;
3956                }
3957                if (bp->wol) {
3958                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3959                }
3960                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3961                                      pmcsr);
3962
3963                /* No more memory access after this point until
3964                 * device is brought back to D0.
3965                 */
3966                udelay(50);
3967                break;
3968        }
3969        default:
3970                return -EINVAL;
3971        }
3972        return 0;
3973}
3974
3975static int
3976bnx2_acquire_nvram_lock(struct bnx2 *bp)
3977{
3978        u32 val;
3979        int j;
3980
3981        /* Request access to the flash interface. */
3982        REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3983        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3984                val = REG_RD(bp, BNX2_NVM_SW_ARB);
3985                if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3986                        break;
3987
3988                udelay(5);
3989        }
3990
3991        if (j >= NVRAM_TIMEOUT_COUNT)
3992                return -EBUSY;
3993
3994        return 0;
3995}
3996
3997static int
3998bnx2_release_nvram_lock(struct bnx2 *bp)
3999{
4000        int j;
4001        u32 val;
4002
4003        /* Relinquish nvram interface. */
4004        REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4005
4006        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4007                val = REG_RD(bp, BNX2_NVM_SW_ARB);
4008                if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4009                        break;
4010
4011                udelay(5);
4012        }
4013
4014        if (j >= NVRAM_TIMEOUT_COUNT)
4015                return -EBUSY;
4016
4017        return 0;
4018}
4019
4020
4021static int
4022bnx2_enable_nvram_write(struct bnx2 *bp)
4023{
4024        u32 val;
4025
4026        val = REG_RD(bp, BNX2_MISC_CFG);
4027        REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4028
4029        if (bp->flash_info->flags & BNX2_NV_WREN) {
4030                int j;
4031
4032                REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4033                REG_WR(bp, BNX2_NVM_COMMAND,
4034                       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4035
4036                for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4037                        udelay(5);
4038
4039                        val = REG_RD(bp, BNX2_NVM_COMMAND);
4040                        if (val & BNX2_NVM_COMMAND_DONE)
4041                                break;
4042                }
4043
4044                if (j >= NVRAM_TIMEOUT_COUNT)
4045                        return -EBUSY;
4046        }
4047        return 0;
4048}
4049
4050static void
4051bnx2_disable_nvram_write(struct bnx2 *bp)
4052{
4053        u32 val;
4054
4055        val = REG_RD(bp, BNX2_MISC_CFG);
4056        REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4057}
4058
4059
4060static void
4061bnx2_enable_nvram_access(struct bnx2 *bp)
4062{
4063        u32 val;
4064
4065        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4066        /* Enable both bits, even on read. */
4067        REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4068               val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4069}
4070
4071static void
4072bnx2_disable_nvram_access(struct bnx2 *bp)
4073{
4074        u32 val;
4075
4076        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4077        /* Disable both bits, even after read. */
4078        REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4079                val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4080                        BNX2_NVM_ACCESS_ENABLE_WR_EN));
4081}
4082
4083static int
4084bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4085{
4086        u32 cmd;
4087        int j;
4088
4089        if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4090                /* Buffered flash, no erase needed */
4091                return 0;
4092
4093        /* Build an erase command */
4094        cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4095              BNX2_NVM_COMMAND_DOIT;
4096
4097        /* Need to clear DONE bit separately. */
4098        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4099
4100        /* Address of the NVRAM to read from. */
4101        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4102
4103        /* Issue an erase command. */
4104        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4105
4106        /* Wait for completion. */
4107        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108                u32 val;
4109
4110                udelay(5);
4111
4112                val = REG_RD(bp, BNX2_NVM_COMMAND);
4113                if (val & BNX2_NVM_COMMAND_DONE)
4114                        break;
4115        }
4116
4117        if (j >= NVRAM_TIMEOUT_COUNT)
4118                return -EBUSY;
4119
4120        return 0;
4121}
4122
4123static int
4124bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4125{
4126        u32 cmd;
4127        int j;
4128
4129        /* Build the command word. */
4130        cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4131
4132        /* Calculate an offset of a buffered flash, not needed for 5709. */
4133        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4134                offset = ((offset / bp->flash_info->page_size) <<
4135                           bp->flash_info->page_bits) +
4136                          (offset % bp->flash_info->page_size);
4137        }
4138
4139        /* Need to clear DONE bit separately. */
4140        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4141
4142        /* Address of the NVRAM to read from. */
4143        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4144
4145        /* Issue a read command. */
4146        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4147
4148        /* Wait for completion. */
4149        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4150                u32 val;
4151
4152                udelay(5);
4153
4154                val = REG_RD(bp, BNX2_NVM_COMMAND);
4155                if (val & BNX2_NVM_COMMAND_DONE) {
4156                        __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4157                        memcpy(ret_val, &v, 4);
4158                        break;
4159                }
4160        }
4161        if (j >= NVRAM_TIMEOUT_COUNT)
4162                return -EBUSY;
4163
4164        return 0;
4165}
4166
4167
4168static int
4169bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4170{
4171        u32 cmd;
4172        __be32 val32;
4173        int j;
4174
4175        /* Build the command word. */
4176        cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4177
4178        /* Calculate an offset of a buffered flash, not needed for 5709. */
4179        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4180                offset = ((offset / bp->flash_info->page_size) <<
4181                          bp->flash_info->page_bits) +
4182                         (offset % bp->flash_info->page_size);
4183        }
4184
4185        /* Need to clear DONE bit separately. */
4186        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4187
4188        memcpy(&val32, val, 4);
4189
4190        /* Write the data. */
4191        REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4192
4193        /* Address of the NVRAM to write to. */
4194        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196        /* Issue the write command. */
4197        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199        /* Wait for completion. */
4200        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201                udelay(5);
4202
4203                if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4204                        break;
4205        }
4206        if (j >= NVRAM_TIMEOUT_COUNT)
4207                return -EBUSY;
4208
4209        return 0;
4210}
4211
4212static int
4213bnx2_init_nvram(struct bnx2 *bp)
4214{
4215        u32 val;
4216        int j, entry_count, rc = 0;
4217        const struct flash_spec *flash;
4218
4219        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4220                bp->flash_info = &flash_5709;
4221                goto get_flash_size;
4222        }
4223
4224        /* Determine the selected interface. */
4225        val = REG_RD(bp, BNX2_NVM_CFG1);
4226
4227        entry_count = ARRAY_SIZE(flash_table);
4228
4229        if (val & 0x40000000) {
4230
4231                /* Flash interface has been reconfigured */
4232                for (j = 0, flash = &flash_table[0]; j < entry_count;
4233                     j++, flash++) {
4234                        if ((val & FLASH_BACKUP_STRAP_MASK) ==
4235                            (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4236                                bp->flash_info = flash;
4237                                break;
4238                        }
4239                }
4240        }
4241        else {
4242                u32 mask;
4243                /* Not yet been reconfigured */
4244
4245                if (val & (1 << 23))
4246                        mask = FLASH_BACKUP_STRAP_MASK;
4247                else
4248                        mask = FLASH_STRAP_MASK;
4249
4250                for (j = 0, flash = &flash_table[0]; j < entry_count;
4251                        j++, flash++) {
4252
4253                        if ((val & mask) == (flash->strapping & mask)) {
4254                                bp->flash_info = flash;
4255
4256                                /* Request access to the flash interface. */
4257                                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4258                                        return rc;
4259
4260                                /* Enable access to flash interface */
4261                                bnx2_enable_nvram_access(bp);
4262
4263                                /* Reconfigure the flash interface */
4264                                REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4265                                REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4266                                REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4267                                REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4268
4269                                /* Disable access to flash interface */
4270                                bnx2_disable_nvram_access(bp);
4271                                bnx2_release_nvram_lock(bp);
4272
4273                                break;
4274                        }
4275                }
4276        } /* if (val & 0x40000000) */
4277
4278        if (j == entry_count) {
4279                bp->flash_info = NULL;
4280                pr_alert("Unknown flash/EEPROM type\n");
4281                return -ENODEV;
4282        }
4283
4284get_flash_size:
4285        val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4286        val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4287        if (val)
4288                bp->flash_size = val;
4289        else
4290                bp->flash_size = bp->flash_info->total_size;
4291
4292        return rc;
4293}
4294
4295static int
4296bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4297                int buf_size)
4298{
4299        int rc = 0;
4300        u32 cmd_flags, offset32, len32, extra;
4301
4302        if (buf_size == 0)
4303                return 0;
4304
4305        /* Request access to the flash interface. */
4306        if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4307                return rc;
4308
4309        /* Enable access to flash interface */
4310        bnx2_enable_nvram_access(bp);
4311
4312        len32 = buf_size;
4313        offset32 = offset;
4314        extra = 0;
4315
4316        cmd_flags = 0;
4317
4318        if (offset32 & 3) {
4319                u8 buf[4];
4320                u32 pre_len;
4321
4322                offset32 &= ~3;
4323                pre_len = 4 - (offset & 3);
4324
4325                if (pre_len >= len32) {
4326                        pre_len = len32;
4327                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4328                                    BNX2_NVM_COMMAND_LAST;
4329                }
4330                else {
4331                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4332                }
4333
4334                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4335
4336                if (rc)
4337                        return rc;
4338
4339                memcpy(ret_buf, buf + (offset & 3), pre_len);
4340
4341                offset32 += 4;
4342                ret_buf += pre_len;
4343                len32 -= pre_len;
4344        }
4345        if (len32 & 3) {
4346                extra = 4 - (len32 & 3);
4347                len32 = (len32 + 4) & ~3;
4348        }
4349
4350        if (len32 == 4) {
4351                u8 buf[4];
4352
4353                if (cmd_flags)
4354                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4355                else
4356                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4357                                    BNX2_NVM_COMMAND_LAST;
4358
4359                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4360
4361                memcpy(ret_buf, buf, 4 - extra);
4362        }
4363        else if (len32 > 0) {
4364                u8 buf[4];
4365
4366                /* Read the first word. */
4367                if (cmd_flags)
4368                        cmd_flags = 0;
4369                else
4370                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4371
4372                rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4373
4374                /* Advance to the next dword. */
4375                offset32 += 4;
4376                ret_buf += 4;
4377                len32 -= 4;
4378
4379                while (len32 > 4 && rc == 0) {
4380                        rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4381
4382                        /* Advance to the next dword. */
4383                        offset32 += 4;
4384                        ret_buf += 4;
4385                        len32 -= 4;
4386                }
4387
4388                if (rc)
4389                        return rc;
4390
4391                cmd_flags = BNX2_NVM_COMMAND_LAST;
4392                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393
4394                memcpy(ret_buf, buf, 4 - extra);
4395        }
4396
4397        /* Disable access to flash interface */
4398        bnx2_disable_nvram_access(bp);
4399
4400        bnx2_release_nvram_lock(bp);
4401
4402        return rc;
4403}
4404
4405static int
4406bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4407                int buf_size)
4408{
4409        u32 written, offset32, len32;
4410        u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4411        int rc = 0;
4412        int align_start, align_end;
4413
4414        buf = data_buf;
4415        offset32 = offset;
4416        len32 = buf_size;
4417        align_start = align_end = 0;
4418
4419        if ((align_start = (offset32 & 3))) {
4420                offset32 &= ~3;
4421                len32 += align_start;
4422                if (len32 < 4)
4423                        len32 = 4;
4424                if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4425                        return rc;
4426        }
4427
4428        if (len32 & 3) {
4429                align_end = 4 - (len32 & 3);
4430                len32 += align_end;
4431                if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4432                        return rc;
4433        }
4434
4435        if (align_start || align_end) {
4436                align_buf = kmalloc(len32, GFP_KERNEL);
4437                if (align_buf == NULL)
4438                        return -ENOMEM;
4439                if (align_start) {
4440                        memcpy(align_buf, start, 4);
4441                }
4442                if (align_end) {
4443                        memcpy(align_buf + len32 - 4, end, 4);
4444                }
4445                memcpy(align_buf + align_start, data_buf, buf_size);
4446                buf = align_buf;
4447        }
4448
4449        if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4450                flash_buffer = kmalloc(264, GFP_KERNEL);
4451                if (flash_buffer == NULL) {
4452                        rc = -ENOMEM;
4453                        goto nvram_write_end;
4454                }
4455        }
4456
4457        written = 0;
4458        while ((written < len32) && (rc == 0)) {
4459                u32 page_start, page_end, data_start, data_end;
4460                u32 addr, cmd_flags;
4461                int i;
4462
4463                /* Find the page_start addr */
4464                page_start = offset32 + written;
4465                page_start -= (page_start % bp->flash_info->page_size);
4466                /* Find the page_end addr */
4467                page_end = page_start + bp->flash_info->page_size;
4468                /* Find the data_start addr */
4469                data_start = (written == 0) ? offset32 : page_start;
4470                /* Find the data_end addr */
4471                data_end = (page_end > offset32 + len32) ?
4472                        (offset32 + len32) : page_end;
4473
4474                /* Request access to the flash interface. */
4475                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4476                        goto nvram_write_end;
4477
4478                /* Enable access to flash interface */
4479                bnx2_enable_nvram_access(bp);
4480
4481                cmd_flags = BNX2_NVM_COMMAND_FIRST;
4482                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4483                        int j;
4484
4485                        /* Read the whole page into the buffer
4486                         * (non-buffer flash only) */
4487                        for (j = 0; j < bp->flash_info->page_size; j += 4) {
4488                                if (j == (bp->flash_info->page_size - 4)) {
4489                                        cmd_flags |= BNX2_NVM_COMMAND_LAST;
4490                                }
4491                                rc = bnx2_nvram_read_dword(bp,
4492                                        page_start + j,
4493                                        &flash_buffer[j],
4494                                        cmd_flags);
4495
4496                                if (rc)
4497                                        goto nvram_write_end;
4498
4499                                cmd_flags = 0;
4500                        }
4501                }
4502
4503                /* Enable writes to flash interface (unlock write-protect) */
4504                if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4505                        goto nvram_write_end;
4506
4507                /* Loop to write back the buffer data from page_start to
4508                 * data_start */
4509                i = 0;
4510                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4511                        /* Erase the page */
4512                        if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4513                                goto nvram_write_end;
4514
4515                        /* Re-enable the write again for the actual write */
4516                        bnx2_enable_nvram_write(bp);
4517
4518                        for (addr = page_start; addr < data_start;
4519                                addr += 4, i += 4) {
4520
4521                                rc = bnx2_nvram_write_dword(bp, addr,
4522                                        &flash_buffer[i], cmd_flags);
4523
4524                                if (rc != 0)
4525                                        goto nvram_write_end;
4526
4527                                cmd_flags = 0;
4528                        }
4529                }
4530
4531                /* Loop to write the new data from data_start to data_end */
4532                for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4533                        if ((addr == page_end - 4) ||
4534                                ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4535                                 (addr == data_end - 4))) {
4536
4537                                cmd_flags |= BNX2_NVM_COMMAND_LAST;
4538                        }
4539                        rc = bnx2_nvram_write_dword(bp, addr, buf,
4540                                cmd_flags);
4541
4542                        if (rc != 0)
4543                                goto nvram_write_end;
4544
4545                        cmd_flags = 0;
4546                        buf += 4;
4547                }
4548
4549                /* Loop to write back the buffer data from data_end
4550                 * to page_end */
4551                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4552                        for (addr = data_end; addr < page_end;
4553                                addr += 4, i += 4) {
4554
4555                                if (addr == page_end-4) {
4556                                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4557                                }
4558                                rc = bnx2_nvram_write_dword(bp, addr,
4559                                        &flash_buffer[i], cmd_flags);
4560
4561                                if (rc != 0)
4562                                        goto nvram_write_end;
4563
4564                                cmd_flags = 0;
4565                        }
4566                }
4567
4568                /* Disable writes to flash interface (lock write-protect) */
4569                bnx2_disable_nvram_write(bp);
4570
4571                /* Disable access to flash interface */
4572                bnx2_disable_nvram_access(bp);
4573                bnx2_release_nvram_lock(bp);
4574
4575                /* Increment written */
4576                written += data_end - data_start;
4577        }
4578
4579nvram_write_end:
4580        kfree(flash_buffer);
4581        kfree(align_buf);
4582        return rc;
4583}
4584
4585static void
4586bnx2_init_fw_cap(struct bnx2 *bp)
4587{
4588        u32 val, sig = 0;
4589
4590        bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4591        bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4592
4593        if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4594                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4595
4596        val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4597        if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4598                return;
4599
4600        if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4601                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4602                sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4603        }
4604
4605        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4606            (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4607                u32 link;
4608
4609                bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4610
4611                link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4612                if (link & BNX2_LINK_STATUS_SERDES_LINK)
4613                        bp->phy_port = PORT_FIBRE;
4614                else
4615                        bp->phy_port = PORT_TP;
4616
4617                sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4618                       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4619        }
4620
4621        if (netif_running(bp->dev) && sig)
4622                bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4623}
4624
4625static void
4626bnx2_setup_msix_tbl(struct bnx2 *bp)
4627{
4628        REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4629
4630        REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4631        REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4632}
4633
4634static int
4635bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4636{
4637        u32 val;
4638        int i, rc = 0;
4639        u8 old_port;
4640
4641        /* Wait for the current PCI transaction to complete before
4642         * issuing a reset. */
4643        if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4644            (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4645                REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4646                       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4647                       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4648                       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4649                       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4650                val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4651                udelay(5);
4652        } else {  /* 5709 */
4653                val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4654                val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4655                REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4656                val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4657
4658                for (i = 0; i < 100; i++) {
4659                        msleep(1);
4660                        val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4661                        if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4662                                break;
4663                }
4664        }
4665
4666        /* Wait for the firmware to tell us it is ok to issue a reset. */
4667        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4668
4669        /* Deposit a driver reset signature so the firmware knows that
4670         * this is a soft reset. */
4671        bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4672                      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4673
4674        /* Do a dummy read to force the chip to complete all current transaction
4675         * before we issue a reset. */
4676        val = REG_RD(bp, BNX2_MISC_ID);
4677
4678        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4679                REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4680                REG_RD(bp, BNX2_MISC_COMMAND);
4681                udelay(5);
4682
4683                val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4684                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4685
4686                REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4687
4688        } else {
4689                val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4690                      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4691                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4692
4693                /* Chip reset. */
4694                REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4695
4696                /* Reading back any register after chip reset will hang the
4697                 * bus on 5706 A0 and A1.  The msleep below provides plenty
4698                 * of margin for write posting.
4699                 */
4700                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4701                    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4702                        msleep(20);
4703
4704                /* Reset takes approximate 30 usec */
4705                for (i = 0; i < 10; i++) {
4706                        val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4707                        if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4708                                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4709                                break;
4710                        udelay(10);
4711                }
4712
4713                if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714                           BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4715                        pr_err("Chip reset did not complete\n");
4716                        return -EBUSY;
4717                }
4718        }
4719
4720        /* Make sure byte swapping is properly configured. */
4721        val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4722        if (val != 0x01020304) {
4723                pr_err("Chip not in correct endian mode\n");
4724                return -ENODEV;
4725        }
4726
4727        /* Wait for the firmware to finish its initialization. */
4728        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4729        if (rc)
4730                return rc;
4731
4732        spin_lock_bh(&bp->phy_lock);
4733        old_port = bp->phy_port;
4734        bnx2_init_fw_cap(bp);
4735        if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4736            old_port != bp->phy_port)
4737                bnx2_set_default_remote_link(bp);
4738        spin_unlock_bh(&bp->phy_lock);
4739
4740        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4741                /* Adjust the voltage regular to two steps lower.  The default
4742                 * of this register is 0x0000000e. */
4743                REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4744
4745                /* Remove bad rbuf memory from the free pool. */
4746                rc = bnx2_alloc_bad_rbuf(bp);
4747        }
4748
4749        if (bp->flags & BNX2_FLAG_USING_MSIX) {
4750                bnx2_setup_msix_tbl(bp);
4751                /* Prevent MSIX table reads and write from timing out */
4752                REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4753                        BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4754        }
4755
4756        return rc;
4757}
4758
4759static int
4760bnx2_init_chip(struct bnx2 *bp)
4761{
4762        u32 val, mtu;
4763        int rc, i;
4764
4765        /* Make sure the interrupt is not active. */
4766        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4767
4768        val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4769              BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4770#ifdef __BIG_ENDIAN
4771              BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4772#endif
4773              BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4774              DMA_READ_CHANS << 12 |
4775              DMA_WRITE_CHANS << 16;
4776
4777        val |= (0x2 << 20) | (1 << 11);
4778
4779        if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4780                val |= (1 << 23);
4781
4782        if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4783            (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4784                val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4785
4786        REG_WR(bp, BNX2_DMA_CONFIG, val);
4787
4788        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4789                val = REG_RD(bp, BNX2_TDMA_CONFIG);
4790                val |= BNX2_TDMA_CONFIG_ONE_DMA;
4791                REG_WR(bp, BNX2_TDMA_CONFIG, val);
4792        }
4793
4794        if (bp->flags & BNX2_FLAG_PCIX) {
4795                u16 val16;
4796
4797                pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4798                                     &val16);
4799                pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                      val16 & ~PCI_X_CMD_ERO);
4801        }
4802
4803        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4804               BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4805               BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4806               BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4807
4808        /* Initialize context mapping and zero out the quick contexts.  The
4809         * context block must have already been enabled. */
4810        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4811                rc = bnx2_init_5709_context(bp);
4812                if (rc)
4813                        return rc;
4814        } else
4815                bnx2_init_context(bp);
4816
4817        if ((rc = bnx2_init_cpus(bp)) != 0)
4818                return rc;
4819
4820        bnx2_init_nvram(bp);
4821
4822        bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4823
4824        val = REG_RD(bp, BNX2_MQ_CONFIG);
4825        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4826        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4827        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4828                val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4829                if (CHIP_REV(bp) == CHIP_REV_Ax)
4830                        val |= BNX2_MQ_CONFIG_HALT_DIS;
4831        }
4832
4833        REG_WR(bp, BNX2_MQ_CONFIG, val);
4834
4835        val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4836        REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4837        REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4838
4839        val = (BCM_PAGE_BITS - 8) << 24;
4840        REG_WR(bp, BNX2_RV2P_CONFIG, val);
4841
4842        /* Configure page size. */
4843        val = REG_RD(bp, BNX2_TBDR_CONFIG);
4844        val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4845        val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4846        REG_WR(bp, BNX2_TBDR_CONFIG, val);
4847
4848        val = bp->mac_addr[0] +
4849              (bp->mac_addr[1] << 8) +
4850              (bp->mac_addr[2] << 16) +
4851              bp->mac_addr[3] +
4852              (bp->mac_addr[4] << 8) +
4853              (bp->mac_addr[5] << 16);
4854        REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4855
4856        /* Program the MTU.  Also include 4 bytes for CRC32. */
4857        mtu = bp->dev->mtu;
4858        val = mtu + ETH_HLEN + ETH_FCS_LEN;
4859        if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4860                val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4861        REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4862
4863        if (mtu < 1500)
4864                mtu = 1500;
4865
4866        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4867        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4868        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4869
4870        memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4871        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4872                bp->bnx2_napi[i].last_status_idx = 0;
4873
4874        bp->idle_chk_status_idx = 0xffff;
4875
4876        bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4877
4878        /* Set up how to generate a link change interrupt. */
4879        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4880
4881        REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4882               (u64) bp->status_blk_mapping & 0xffffffff);
4883        REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4884
4885        REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4886               (u64) bp->stats_blk_mapping & 0xffffffff);
4887        REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4888               (u64) bp->stats_blk_mapping >> 32);
4889
4890        REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4891               (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4892
4893        REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4894               (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4895
4896        REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4897               (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4898
4899        REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4900
4901        REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4902
4903        REG_WR(bp, BNX2_HC_COM_TICKS,
4904               (bp->com_ticks_int << 16) | bp->com_ticks);
4905
4906        REG_WR(bp, BNX2_HC_CMD_TICKS,
4907               (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4908
4909        if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4910                REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4911        else
4912                REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4913        REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4914
4915        if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4916                val = BNX2_HC_CONFIG_COLLECT_STATS;
4917        else {
4918                val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4919                      BNX2_HC_CONFIG_COLLECT_STATS;
4920        }
4921
4922        if (bp->flags & BNX2_FLAG_USING_MSIX) {
4923                REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4924                       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4925
4926                val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4927        }
4928
4929        if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4930                val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4931
4932        REG_WR(bp, BNX2_HC_CONFIG, val);
4933
4934        if (bp->rx_ticks < 25)
4935                bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4936        else
4937                bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4938
4939        for (i = 1; i < bp->irq_nvecs; i++) {
4940                u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4941                           BNX2_HC_SB_CONFIG_1;
4942
4943                REG_WR(bp, base,
4944                        BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4945                        BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4946                        BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4947
4948                REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4949                        (bp->tx_quick_cons_trip_int << 16) |
4950                         bp->tx_quick_cons_trip);
4951
4952                REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4953                        (bp->tx_ticks_int << 16) | bp->tx_ticks);
4954
4955                REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4956                       (bp->rx_quick_cons_trip_int << 16) |
4957                        bp->rx_quick_cons_trip);
4958
4959                REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4960                        (bp->rx_ticks_int << 16) | bp->rx_ticks);
4961        }
4962
4963        /* Clear internal stats counters. */
4964        REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4965
4966        REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4967
4968        /* Initialize the receive filter. */
4969        bnx2_set_rx_mode(bp->dev);
4970
4971        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4972                val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4973                val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4974                REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4975        }
4976        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4977                          1, 0);
4978
4979        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4980        REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4981
4982        udelay(20);
4983
4984        bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4985
4986        return rc;
4987}
4988
4989static void
4990bnx2_clear_ring_states(struct bnx2 *bp)
4991{
4992        struct bnx2_napi *bnapi;
4993        struct bnx2_tx_ring_info *txr;
4994        struct bnx2_rx_ring_info *rxr;
4995        int i;
4996
4997        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4998                bnapi = &bp->bnx2_napi[i];
4999                txr = &bnapi->tx_ring;
5000                rxr = &bnapi->rx_ring;
5001
5002                txr->tx_cons = 0;
5003                txr->hw_tx_cons = 0;
5004                rxr->rx_prod_bseq = 0;
5005                rxr->rx_prod = 0;
5006                rxr->rx_cons = 0;
5007                rxr->rx_pg_prod = 0;
5008                rxr->rx_pg_cons = 0;
5009        }
5010}
5011
5012static void
5013bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5014{
5015        u32 val, offset0, offset1, offset2, offset3;
5016        u32 cid_addr = GET_CID_ADDR(cid);
5017
5018        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5019                offset0 = BNX2_L2CTX_TYPE_XI;
5020                offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5021                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5022                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5023        } else {
5024                offset0 = BNX2_L2CTX_TYPE;
5025                offset1 = BNX2_L2CTX_CMD_TYPE;
5026                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5027                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5028        }
5029        val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5030        bnx2_ctx_wr(bp, cid_addr, offset0, val);
5031
5032        val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5033        bnx2_ctx_wr(bp, cid_addr, offset1, val);
5034
5035        val = (u64) txr->tx_desc_mapping >> 32;
5036        bnx2_ctx_wr(bp, cid_addr, offset2, val);
5037
5038        val = (u64) txr->tx_desc_mapping & 0xffffffff;
5039        bnx2_ctx_wr(bp, cid_addr, offset3, val);
5040}
5041
5042static void
5043bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5044{
5045        struct tx_bd *txbd;
5046        u32 cid = TX_CID;
5047        struct bnx2_napi *bnapi;
5048        struct bnx2_tx_ring_info *txr;
5049
5050        bnapi = &bp->bnx2_napi[ring_num];
5051        txr = &bnapi->tx_ring;
5052
5053        if (ring_num == 0)
5054                cid = TX_CID;
5055        else
5056                cid = TX_TSS_CID + ring_num - 1;
5057
5058        bp->tx_wake_thresh = bp->tx_ring_size / 2;
5059
5060        txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5061
5062        txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5063        txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5064
5065        txr->tx_prod = 0;
5066        txr->tx_prod_bseq = 0;
5067
5068        txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5069        txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5070
5071        bnx2_init_tx_context(bp, cid, txr);
5072}
5073
5074static void
5075bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5076                     int num_rings)
5077{
5078        int i;
5079        struct rx_bd *rxbd;
5080
5081        for (i = 0; i < num_rings; i++) {
5082                int j;
5083
5084                rxbd = &rx_ring[i][0];
5085                for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5086                        rxbd->rx_bd_len = buf_size;
5087                        rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5088                }
5089                if (i == (num_rings - 1))
5090                        j = 0;
5091                else
5092                        j = i + 1;
5093                rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5094                rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5095        }
5096}
5097
5098static void
5099bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5100{
5101        int i;
5102        u16 prod, ring_prod;
5103        u32 cid, rx_cid_addr, val;
5104        struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5105        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5106
5107        if (ring_num == 0)
5108                cid = RX_CID;
5109        else
5110                cid = RX_RSS_CID + ring_num - 1;
5111
5112        rx_cid_addr = GET_CID_ADDR(cid);
5113
5114        bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5115                             bp->rx_buf_use_size, bp->rx_max_ring);
5116
5117        bnx2_init_rx_context(bp, cid);
5118
5119        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5120                val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5121                REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5122        }
5123
5124        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5125        if (bp->rx_pg_ring_size) {
5126                bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5127                                     rxr->rx_pg_desc_mapping,
5128                                     PAGE_SIZE, bp->rx_max_pg_ring);
5129                val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5130                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5131                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5132                       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5133
5134                val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5135                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5136
5137                val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5138                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5139
5140                if (CHIP_NUM(bp) == CHIP_NUM_5709)
5141                        REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5142        }
5143
5144        val = (u64) rxr->rx_desc_mapping[0] >> 32;
5145        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5146
5147        val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5148        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5149
5150        ring_prod = prod = rxr->rx_pg_prod;
5151        for (i = 0; i < bp->rx_pg_ring_size; i++) {
5152                if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5153                        netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5154                                    ring_num, i, bp->rx_pg_ring_size);
5155                        break;
5156                }
5157                prod = NEXT_RX_BD(prod);
5158                ring_prod = RX_PG_RING_IDX(prod);
5159        }
5160        rxr->rx_pg_prod = prod;
5161
5162        ring_prod = prod = rxr->rx_prod;
5163        for (i = 0; i < bp->rx_ring_size; i++) {
5164                if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5165                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5166                                    ring_num, i, bp->rx_ring_size);
5167                        break;
5168                }
5169                prod = NEXT_RX_BD(prod);
5170                ring_prod = RX_RING_IDX(prod);
5171        }
5172        rxr->rx_prod = prod;
5173
5174        rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5175        rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5176        rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5177
5178        REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5179        REG_WR16(bp, rxr->rx_bidx_addr, prod);
5180
5181        REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5182}
5183
5184static void
5185bnx2_init_all_rings(struct bnx2 *bp)
5186{
5187        int i;
5188        u32 val;
5189
5190        bnx2_clear_ring_states(bp);
5191
5192        REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5193        for (i = 0; i < bp->num_tx_rings; i++)
5194                bnx2_init_tx_ring(bp, i);
5195
5196        if (bp->num_tx_rings > 1)
5197                REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5198                       (TX_TSS_CID << 7));
5199
5200        REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5201        bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5202
5203        for (i = 0; i < bp->num_rx_rings; i++)
5204                bnx2_init_rx_ring(bp, i);
5205
5206        if (bp->num_rx_rings > 1) {
5207                u32 tbl_32 = 0;
5208
5209                for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5210                        int shift = (i % 8) << 2;
5211
5212                        tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5213                        if ((i % 8) == 7) {
5214                                REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5215                                REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5216                                        BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5217                                        BNX2_RLUP_RSS_COMMAND_WRITE |
5218                                        BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5219                                tbl_32 = 0;
5220                        }
5221                }
5222
5223                val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5224                      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5225
5226                REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5227
5228        }
5229}
5230
5231static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5232{
5233        u32 max, num_rings = 1;
5234
5235        while (ring_size > MAX_RX_DESC_CNT) {
5236                ring_size -= MAX_RX_DESC_CNT;
5237                num_rings++;
5238        }
5239        /* round to next power of 2 */
5240        max = max_size;
5241        while ((max & num_rings) == 0)
5242                max >>= 1;
5243
5244        if (num_rings != max)
5245                max <<= 1;
5246
5247        return max;
5248}
5249
5250static void
5251bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5252{
5253        u32 rx_size, rx_space, jumbo_size;
5254
5255        /* 8 for CRC and VLAN */
5256        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5257
5258        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5259                sizeof(struct skb_shared_info);
5260
5261        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5262        bp->rx_pg_ring_size = 0;
5263        bp->rx_max_pg_ring = 0;
5264        bp->rx_max_pg_ring_idx = 0;
5265        if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5266                int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5267
5268                jumbo_size = size * pages;
5269                if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5270                        jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5271
5272                bp->rx_pg_ring_size = jumbo_size;
5273                bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5274                                                        MAX_RX_PG_RINGS);
5275                bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5276                rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5277                bp->rx_copy_thresh = 0;
5278        }
5279
5280        bp->rx_buf_use_size = rx_size;
5281        /* hw alignment */
5282        bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5283        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5284        bp->rx_ring_size = size;
5285        bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5286        bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5287}
5288
5289static void
5290bnx2_free_tx_skbs(struct bnx2 *bp)
5291{
5292        int i;
5293
5294        for (i = 0; i < bp->num_tx_rings; i++) {
5295                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5296                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5297                int j;
5298
5299                if (txr->tx_buf_ring == NULL)
5300                        continue;
5301
5302                for (j = 0; j < TX_DESC_CNT; ) {
5303                        struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5304                        struct sk_buff *skb = tx_buf->skb;
5305                        int k, last;
5306
5307                        if (skb == NULL) {
5308                                j++;
5309                                continue;
5310                        }
5311
5312                        dma_unmap_single(&bp->pdev->dev,
5313                                         dma_unmap_addr(tx_buf, mapping),
5314                                         skb_headlen(skb),
5315                                         PCI_DMA_TODEVICE);
5316
5317                        tx_buf->skb = NULL;
5318
5319                        last = tx_buf->nr_frags;
5320                        j++;
5321                        for (k = 0; k < last; k++, j++) {
5322                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5323                                dma_unmap_page(&bp->pdev->dev,
5324                                        dma_unmap_addr(tx_buf, mapping),
5325                                        skb_shinfo(skb)->frags[k].size,
5326                                        PCI_DMA_TODEVICE);
5327                        }
5328                        dev_kfree_skb(skb);
5329                }
5330        }
5331}
5332
5333static void
5334bnx2_free_rx_skbs(struct bnx2 *bp)
5335{
5336        int i;
5337
5338        for (i = 0; i < bp->num_rx_rings; i++) {
5339                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5340                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5341                int j;
5342
5343                if (rxr->rx_buf_ring == NULL)
5344                        return;
5345
5346                for (j = 0; j < bp->rx_max_ring_idx; j++) {
5347                        struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5348                        struct sk_buff *skb = rx_buf->skb;
5349
5350                        if (skb == NULL)
5351                                continue;
5352
5353                        dma_unmap_single(&bp->pdev->dev,
5354                                         dma_unmap_addr(rx_buf, mapping),
5355                                         bp->rx_buf_use_size,
5356                                         PCI_DMA_FROMDEVICE);
5357
5358                        rx_buf->skb = NULL;
5359
5360                        dev_kfree_skb(skb);
5361                }
5362                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5363                        bnx2_free_rx_page(bp, rxr, j);
5364        }
5365}
5366
5367static void
5368bnx2_free_skbs(struct bnx2 *bp)
5369{
5370        bnx2_free_tx_skbs(bp);
5371        bnx2_free_rx_skbs(bp);
5372}
5373
5374static int
5375bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5376{
5377        int rc;
5378
5379        rc = bnx2_reset_chip(bp, reset_code);
5380        bnx2_free_skbs(bp);
5381        if (rc)
5382                return rc;
5383
5384        if ((rc = bnx2_init_chip(bp)) != 0)
5385                return rc;
5386
5387        bnx2_init_all_rings(bp);
5388        return 0;
5389}
5390
5391static int
5392bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5393{
5394        int rc;
5395
5396        if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5397                return rc;
5398
5399        spin_lock_bh(&bp->phy_lock);
5400        bnx2_init_phy(bp, reset_phy);
5401        bnx2_set_link(bp);
5402        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5403                bnx2_remote_phy_event(bp);
5404        spin_unlock_bh(&bp->phy_lock);
5405        return 0;
5406}
5407
5408static int
5409bnx2_shutdown_chip(struct bnx2 *bp)
5410{
5411        u32 reset_code;
5412
5413        if (bp->flags & BNX2_FLAG_NO_WOL)
5414                reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5415        else if (bp->wol)
5416                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5417        else
5418                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5419
5420        return bnx2_reset_chip(bp, reset_code);
5421}
5422
5423static int
5424bnx2_test_registers(struct bnx2 *bp)
5425{
5426        int ret;
5427        int i, is_5709;
5428        static const struct {
5429                u16   offset;
5430                u16   flags;
5431#define BNX2_FL_NOT_5709        1
5432                u32   rw_mask;
5433                u32   ro_mask;
5434        } reg_tbl[] = {
5435                { 0x006c, 0, 0x00000000, 0x0000003f },
5436                { 0x0090, 0, 0xffffffff, 0x00000000 },
5437                { 0x0094, 0, 0x00000000, 0x00000000 },
5438
5439                { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5440                { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5441                { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5442                { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5443                { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5444                { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5445                { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5446                { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5447                { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448
5449                { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450                { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451                { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5452                { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453                { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454                { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455
5456                { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5457                { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5458                { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5459
5460                { 0x1000, 0, 0x00000000, 0x00000001 },
5461                { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5462
5463                { 0x1408, 0, 0x01c00800, 0x00000000 },
5464                { 0x149c, 0, 0x8000ffff, 0x00000000 },
5465                { 0x14a8, 0, 0x00000000, 0x000001ff },
5466                { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5467                { 0x14b0, 0, 0x00000002, 0x00000001 },
5468                { 0x14b8, 0, 0x00000000, 0x00000000 },
5469                { 0x14c0, 0, 0x00000000, 0x00000009 },
5470                { 0x14c4, 0, 0x00003fff, 0x00000000 },
5471                { 0x14cc, 0, 0x00000000, 0x00000001 },
5472                { 0x14d0, 0, 0xffffffff, 0x00000000 },
5473
5474                { 0x1800, 0, 0x00000000, 0x00000001 },
5475                { 0x1804, 0, 0x00000000, 0x00000003 },
5476
5477                { 0x2800, 0, 0x00000000, 0x00000001 },
5478                { 0x2804, 0, 0x00000000, 0x00003f01 },
5479                { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5480                { 0x2810, 0, 0xffff0000, 0x00000000 },
5481                { 0x2814, 0, 0xffff0000, 0x00000000 },
5482                { 0x2818, 0, 0xffff0000, 0x00000000 },
5483                { 0x281c, 0, 0xffff0000, 0x00000000 },
5484                { 0x2834, 0, 0xffffffff, 0x00000000 },
5485                { 0x2840, 0, 0x00000000, 0xffffffff },
5486                { 0x2844, 0, 0x00000000, 0xffffffff },
5487                { 0x2848, 0, 0xffffffff, 0x00000000 },
5488                { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5489
5490                { 0x2c00, 0, 0x00000000, 0x00000011 },
5491                { 0x2c04, 0, 0x00000000, 0x00030007 },
5492
5493                { 0x3c00, 0, 0x00000000, 0x00000001 },
5494                { 0x3c04, 0, 0x00000000, 0x00070000 },
5495                { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5496                { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5497                { 0x3c10, 0, 0xffffffff, 0x00000000 },
5498                { 0x3c14, 0, 0x00000000, 0xffffffff },
5499                { 0x3c18, 0, 0x00000000, 0xffffffff },
5500                { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5501                { 0x3c20, 0, 0xffffff00, 0x00000000 },
5502
5503                { 0x5004, 0, 0x00000000, 0x0000007f },
5504                { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5505
5506                { 0x5c00, 0, 0x00000000, 0x00000001 },
5507                { 0x5c04, 0, 0x00000000, 0x0003000f },
5508                { 0x5c08, 0, 0x00000003, 0x00000000 },
5509                { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5510                { 0x5c10, 0, 0x00000000, 0xffffffff },
5511                { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5512                { 0x5c84, 0, 0x00000000, 0x0000f333 },
5513                { 0x5c88, 0, 0x00000000, 0x00077373 },
5514                { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5515
5516                { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5517                { 0x680c, 0, 0xffffffff, 0x00000000 },
5518                { 0x6810, 0, 0xffffffff, 0x00000000 },
5519                { 0x6814, 0, 0xffffffff, 0x00000000 },
5520                { 0x6818, 0, 0xffffffff, 0x00000000 },
5521                { 0x681c, 0, 0xffffffff, 0x00000000 },
5522                { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5523                { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5524                { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5525                { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5526                { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5527                { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5528                { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5529                { 0x683c, 0, 0x0000ffff, 0x00000000 },
5530                { 0x6840, 0, 0x00000ff0, 0x00000000 },
5531                { 0x6844, 0, 0x00ffff00, 0x00000000 },
5532                { 0x684c, 0, 0xffffffff, 0x00000000 },
5533                { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5534                { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5535                { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5536                { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5537                { 0x6908, 0, 0x00000000, 0x0001ff0f },
5538                { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5539
5540                { 0xffff, 0, 0x00000000, 0x00000000 },
5541        };
5542
5543        ret = 0;
5544        is_5709 = 0;
5545        if (CHIP_NUM(bp) == CHIP_NUM_5709)
5546                is_5709 = 1;
5547
5548        for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5549                u32 offset, rw_mask, ro_mask, save_val, val;
5550                u16 flags = reg_tbl[i].flags;
5551
5552                if (is_5709 && (flags & BNX2_FL_NOT_5709))
5553                        continue;
5554
5555                offset = (u32) reg_tbl[i].offset;
5556                rw_mask = reg_tbl[i].rw_mask;
5557                ro_mask = reg_tbl[i].ro_mask;
5558
5559                save_val = readl(bp->regview + offset);
5560
5561                writel(0, bp->regview + offset);
5562
5563                val = readl(bp->regview + offset);
5564                if ((val & rw_mask) != 0) {
5565                        goto reg_test_err;
5566                }
5567
5568                if ((val & ro_mask) != (save_val & ro_mask)) {
5569                        goto reg_test_err;
5570                }
5571
5572                writel(0xffffffff, bp->regview + offset);
5573
5574                val = readl(bp->regview + offset);
5575                if ((val & rw_mask) != rw_mask) {
5576                        goto reg_test_err;
5577                }
5578
5579                if ((val & ro_mask) != (save_val & ro_mask)) {
5580                        goto reg_test_err;
5581                }
5582
5583                writel(save_val, bp->regview + offset);
5584                continue;
5585
5586reg_test_err:
5587                writel(save_val, bp->regview + offset);
5588                ret = -ENODEV;
5589                break;
5590        }
5591        return ret;
5592}
5593
5594static int
5595bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5596{
5597        static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5598                0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5599        int i;
5600
5601        for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5602                u32 offset;
5603
5604                for (offset = 0; offset < size; offset += 4) {
5605
5606                        bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5607
5608                        if (bnx2_reg_rd_ind(bp, start + offset) !=
5609                                test_pattern[i]) {
5610                                return -ENODEV;
5611                        }
5612                }
5613        }
5614        return 0;
5615}
5616
5617static int
5618bnx2_test_memory(struct bnx2 *bp)
5619{
5620        int ret = 0;
5621        int i;
5622        static struct mem_entry {
5623                u32   offset;
5624                u32   len;
5625        } mem_tbl_5706[] = {
5626                { 0x60000,  0x4000 },
5627                { 0xa0000,  0x3000 },
5628                { 0xe0000,  0x4000 },
5629                { 0x120000, 0x4000 },
5630                { 0x1a0000, 0x4000 },
5631                { 0x160000, 0x4000 },
5632                { 0xffffffff, 0    },
5633        },
5634        mem_tbl_5709[] = {
5635                { 0x60000,  0x4000 },
5636                { 0xa0000,  0x3000 },
5637                { 0xe0000,  0x4000 },
5638                { 0x120000, 0x4000 },
5639                { 0x1a0000, 0x4000 },
5640                { 0xffffffff, 0    },
5641        };
5642        struct mem_entry *mem_tbl;
5643
5644        if (CHIP_NUM(bp) == CHIP_NUM_5709)
5645                mem_tbl = mem_tbl_5709;
5646        else
5647                mem_tbl = mem_tbl_5706;
5648
5649        for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5650                if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5651                        mem_tbl[i].len)) != 0) {
5652                        return ret;
5653                }
5654        }
5655
5656        return ret;
5657}
5658
5659#define BNX2_MAC_LOOPBACK       0
5660#define BNX2_PHY_LOOPBACK       1
5661
5662static int
5663bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5664{
5665        unsigned int pkt_size, num_pkts, i;
5666        struct sk_buff *skb, *rx_skb;
5667        unsigned char *packet;
5668        u16 rx_start_idx, rx_idx;
5669        dma_addr_t map;
5670        struct tx_bd *txbd;
5671        struct sw_bd *rx_buf;
5672        struct l2_fhdr *rx_hdr;
5673        int ret = -ENODEV;
5674        struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5675        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5676        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5677
5678        tx_napi = bnapi;
5679
5680        txr = &tx_napi->tx_ring;
5681        rxr = &bnapi->rx_ring;
5682        if (loopback_mode == BNX2_MAC_LOOPBACK) {
5683                bp->loopback = MAC_LOOPBACK;
5684                bnx2_set_mac_loopback(bp);
5685        }
5686        else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5687                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5688                        return 0;
5689
5690                bp->loopback = PHY_LOOPBACK;
5691                bnx2_set_phy_loopback(bp);
5692        }
5693        else
5694                return -EINVAL;
5695
5696        pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5697        skb = netdev_alloc_skb(bp->dev, pkt_size);
5698        if (!skb)
5699                return -ENOMEM;
5700        packet = skb_put(skb, pkt_size);
5701        memcpy(packet, bp->dev->dev_addr, 6);
5702        memset(packet + 6, 0x0, 8);
5703        for (i = 14; i < pkt_size; i++)
5704                packet[i] = (unsigned char) (i & 0xff);
5705
5706        map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5707                             PCI_DMA_TODEVICE);
5708        if (dma_mapping_error(&bp->pdev->dev, map)) {
5709                dev_kfree_skb(skb);
5710                return -EIO;
5711        }
5712
5713        REG_WR(bp, BNX2_HC_COMMAND,
5714               bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5715
5716        REG_RD(bp, BNX2_HC_COMMAND);
5717
5718        udelay(5);
5719        rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5720
5721        num_pkts = 0;
5722
5723        txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5724
5725        txbd->tx_bd_haddr_hi = (u64) map >> 32;
5726        txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5727        txbd->tx_bd_mss_nbytes = pkt_size;
5728        txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5729
5730        num_pkts++;
5731        txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5732        txr->tx_prod_bseq += pkt_size;
5733
5734        REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5735        REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5736
5737        udelay(100);
5738
5739        REG_WR(bp, BNX2_HC_COMMAND,
5740               bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5741
5742        REG_RD(bp, BNX2_HC_COMMAND);
5743
5744        udelay(5);
5745
5746        dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5747        dev_kfree_skb(skb);
5748
5749        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5750                goto loopback_test_done;
5751
5752        rx_idx = bnx2_get_hw_rx_cons(bnapi);
5753        if (rx_idx != rx_start_idx + num_pkts) {
5754                goto loopback_test_done;
5755        }
5756
5757        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5758        rx_skb = rx_buf->skb;
5759
5760        rx_hdr = rx_buf->desc;
5761        skb_reserve(rx_skb, BNX2_RX_OFFSET);
5762
5763        dma_sync_single_for_cpu(&bp->pdev->dev,
5764                dma_unmap_addr(rx_buf, mapping),
5765                bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5766
5767        if (rx_hdr->l2_fhdr_status &
5768                (L2_FHDR_ERRORS_BAD_CRC |
5769                L2_FHDR_ERRORS_PHY_DECODE |
5770                L2_FHDR_ERRORS_ALIGNMENT |
5771                L2_FHDR_ERRORS_TOO_SHORT |
5772                L2_FHDR_ERRORS_GIANT_FRAME)) {
5773
5774                goto loopback_test_done;
5775        }
5776
5777        if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5778                goto loopback_test_done;
5779        }
5780
5781        for (i = 14; i < pkt_size; i++) {
5782                if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5783                        goto loopback_test_done;
5784                }
5785        }
5786
5787        ret = 0;
5788
5789loopback_test_done:
5790        bp->loopback = 0;
5791        return ret;
5792}
5793
5794#define BNX2_MAC_LOOPBACK_FAILED        1
5795#define BNX2_PHY_LOOPBACK_FAILED        2
5796#define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5797                                         BNX2_PHY_LOOPBACK_FAILED)
5798
5799static int
5800bnx2_test_loopback(struct bnx2 *bp)
5801{
5802        int rc = 0;
5803
5804        if (!netif_running(bp->dev))
5805                return BNX2_LOOPBACK_FAILED;
5806
5807        bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5808        spin_lock_bh(&bp->phy_lock);
5809        bnx2_init_phy(bp, 1);
5810        spin_unlock_bh(&bp->phy_lock);
5811        if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5812                rc |= BNX2_MAC_LOOPBACK_FAILED;
5813        if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5814                rc |= BNX2_PHY_LOOPBACK_FAILED;
5815        return rc;
5816}
5817
5818#define NVRAM_SIZE 0x200
5819#define CRC32_RESIDUAL 0xdebb20e3
5820
5821static int
5822bnx2_test_nvram(struct bnx2 *bp)
5823{
5824        __be32 buf[NVRAM_SIZE / 4];
5825        u8 *data = (u8 *) buf;
5826        int rc = 0;
5827        u32 magic, csum;
5828
5829        if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5830                goto test_nvram_done;
5831
5832        magic = be32_to_cpu(buf[0]);
5833        if (magic != 0x669955aa) {
5834                rc = -ENODEV;
5835                goto test_nvram_done;
5836        }
5837
5838        if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5839                goto test_nvram_done;
5840
5841        csum = ether_crc_le(0x100, data);
5842        if (csum != CRC32_RESIDUAL) {
5843                rc = -ENODEV;
5844                goto test_nvram_done;
5845        }
5846
5847        csum = ether_crc_le(0x100, data + 0x100);
5848        if (csum != CRC32_RESIDUAL) {
5849                rc = -ENODEV;
5850        }
5851
5852test_nvram_done:
5853        return rc;
5854}
5855
5856static int
5857bnx2_test_link(struct bnx2 *bp)
5858{
5859        u32 bmsr;
5860
5861        if (!netif_running(bp->dev))
5862                return -ENODEV;
5863
5864        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5865                if (bp->link_up)
5866                        return 0;
5867                return -ENODEV;
5868        }
5869        spin_lock_bh(&bp->phy_lock);
5870        bnx2_enable_bmsr1(bp);
5871        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5872        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5873        bnx2_disable_bmsr1(bp);
5874        spin_unlock_bh(&bp->phy_lock);
5875
5876        if (bmsr & BMSR_LSTATUS) {
5877                return 0;
5878        }
5879        return -ENODEV;
5880}
5881
5882static int
5883bnx2_test_intr(struct bnx2 *bp)
5884{
5885        int i;
5886        u16 status_idx;
5887
5888        if (!netif_running(bp->dev))
5889                return -ENODEV;
5890
5891        status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5892
5893        /* This register is not touched during run-time. */
5894        REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5895        REG_RD(bp, BNX2_HC_COMMAND);
5896
5897        for (i = 0; i < 10; i++) {
5898                if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5899                        status_idx) {
5900
5901                        break;
5902                }
5903
5904                msleep_interruptible(10);
5905        }
5906        if (i < 10)
5907                return 0;
5908
5909        return -ENODEV;
5910}
5911
5912/* Determining link for parallel detection. */
5913static int
5914bnx2_5706_serdes_has_link(struct bnx2 *bp)
5915{
5916        u32 mode_ctl, an_dbg, exp;
5917
5918        if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5919                return 0;
5920
5921        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5922        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5923
5924        if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5925                return 0;
5926
5927        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5928        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5929        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5930
5931        if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5932                return 0;
5933
5934        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5935        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5936        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5937
5938        if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5939                return 0;
5940
5941        return 1;
5942}
5943
5944static void
5945bnx2_5706_serdes_timer(struct bnx2 *bp)
5946{
5947        int check_link = 1;
5948
5949        spin_lock(&bp->phy_lock);
5950        if (bp->serdes_an_pending) {
5951                bp->serdes_an_pending--;
5952                check_link = 0;
5953        } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5954                u32 bmcr;
5955
5956                bp->current_interval = BNX2_TIMER_INTERVAL;
5957
5958                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5959
5960                if (bmcr & BMCR_ANENABLE) {
5961                        if (bnx2_5706_serdes_has_link(bp)) {
5962                                bmcr &= ~BMCR_ANENABLE;
5963                                bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5964                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5965                                bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5966                        }
5967                }
5968        }
5969        else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5970                 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5971                u32 phy2;
5972
5973                bnx2_write_phy(bp, 0x17, 0x0f01);
5974                bnx2_read_phy(bp, 0x15, &phy2);
5975                if (phy2 & 0x20) {
5976                        u32 bmcr;
5977
5978                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5979                        bmcr |= BMCR_ANENABLE;
5980                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5981
5982                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5983                }
5984        } else
5985                bp->current_interval = BNX2_TIMER_INTERVAL;
5986
5987        if (check_link) {
5988                u32 val;
5989
5990                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5991                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5992                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5993
5994                if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5995                        if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5996                                bnx2_5706s_force_link_dn(bp, 1);
5997                                bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5998                        } else
5999                                bnx2_set_link(bp);
6000                } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6001                        bnx2_set_link(bp);
6002        }
6003        spin_unlock(&bp->phy_lock);
6004}
6005
6006static void
6007bnx2_5708_serdes_timer(struct bnx2 *bp)
6008{
6009        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6010                return;
6011
6012        if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6013                bp->serdes_an_pending = 0;
6014                return;
6015        }
6016
6017        spin_lock(&bp->phy_lock);
6018        if (bp->serdes_an_pending)
6019                bp->serdes_an_pending--;
6020        else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6021                u32 bmcr;
6022
6023                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6024                if (bmcr & BMCR_ANENABLE) {
6025                        bnx2_enable_forced_2g5(bp);
6026                        bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6027                } else {
6028                        bnx2_disable_forced_2g5(bp);
6029                        bp->serdes_an_pending = 2;
6030                        bp->current_interval = BNX2_TIMER_INTERVAL;
6031                }
6032
6033        } else
6034                bp->current_interval = BNX2_TIMER_INTERVAL;
6035
6036        spin_unlock(&bp->phy_lock);
6037}
6038
6039static void
6040bnx2_timer(unsigned long data)
6041{
6042        struct bnx2 *bp = (struct bnx2 *) data;
6043
6044        if (!netif_running(bp->dev))
6045                return;
6046
6047        if (atomic_read(&bp->intr_sem) != 0)
6048                goto bnx2_restart_timer;
6049
6050        if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6051             BNX2_FLAG_USING_MSI)
6052                bnx2_chk_missed_msi(bp);
6053
6054        bnx2_send_heart_beat(bp);
6055
6056        bp->stats_blk->stat_FwRxDrop =
6057                bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6058
6059        /* workaround occasional corrupted counters */
6060        if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6061                REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6062                                            BNX2_HC_COMMAND_STATS_NOW);
6063
6064        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6065                if (CHIP_NUM(bp) == CHIP_NUM_5706)
6066                        bnx2_5706_serdes_timer(bp);
6067                else
6068                        bnx2_5708_serdes_timer(bp);
6069        }
6070
6071bnx2_restart_timer:
6072        mod_timer(&bp->timer, jiffies + bp->current_interval);
6073}
6074
6075static int
6076bnx2_request_irq(struct bnx2 *bp)
6077{
6078        unsigned long flags;
6079        struct bnx2_irq *irq;
6080        int rc = 0, i;
6081
6082        if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6083                flags = 0;
6084        else
6085                flags = IRQF_SHARED;
6086
6087        for (i = 0; i < bp->irq_nvecs; i++) {
6088                irq = &bp->irq_tbl[i];
6089                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6090                                 &bp->bnx2_napi[i]);
6091                if (rc)
6092                        break;
6093                irq->requested = 1;
6094        }
6095        return rc;
6096}
6097
6098static void
6099__bnx2_free_irq(struct bnx2 *bp)
6100{
6101        struct bnx2_irq *irq;
6102        int i;
6103
6104        for (i = 0; i < bp->irq_nvecs; i++) {
6105                irq = &bp->irq_tbl[i];
6106                if (irq->requested)
6107                        free_irq(irq->vector, &bp->bnx2_napi[i]);
6108                irq->requested = 0;
6109        }
6110}
6111
6112static void
6113bnx2_free_irq(struct bnx2 *bp)
6114{
6115
6116        __bnx2_free_irq(bp);
6117        if (bp->flags & BNX2_FLAG_USING_MSI)
6118                pci_disable_msi(bp->pdev);
6119        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6120                pci_disable_msix(bp->pdev);
6121
6122        bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6123}
6124
6125static void
6126bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6127{
6128        int i, total_vecs, rc;
6129        struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6130        struct net_device *dev = bp->dev;
6131        const int len = sizeof(bp->irq_tbl[0].name);
6132
6133        bnx2_setup_msix_tbl(bp);
6134        REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6135        REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6136        REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6137
6138        /*  Need to flush the previous three writes to ensure MSI-X
6139         *  is setup properly */
6140        REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6141
6142        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6143                msix_ent[i].entry = i;
6144                msix_ent[i].vector = 0;
6145        }
6146
6147        total_vecs = msix_vecs;
6148#ifdef BCM_CNIC
6149        total_vecs++;
6150#endif
6151        rc = -ENOSPC;
6152        while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6153                rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6154                if (rc <= 0)
6155                        break;
6156                if (rc > 0)
6157                        total_vecs = rc;
6158        }
6159
6160        if (rc != 0)
6161                return;
6162
6163        msix_vecs = total_vecs;
6164#ifdef BCM_CNIC
6165        msix_vecs--;
6166#endif
6167        bp->irq_nvecs = msix_vecs;
6168        bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6169        for (i = 0; i < total_vecs; i++) {
6170                bp->irq_tbl[i].vector = msix_ent[i].vector;
6171                snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6172                bp->irq_tbl[i].handler = bnx2_msi_1shot;
6173        }
6174}
6175
6176static int
6177bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6178{
6179        int cpus = num_online_cpus();
6180        int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6181
6182        bp->irq_tbl[0].handler = bnx2_interrupt;
6183        strcpy(bp->irq_tbl[0].name, bp->dev->name);
6184        bp->irq_nvecs = 1;
6185        bp->irq_tbl[0].vector = bp->pdev->irq;
6186
6187        if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6188                bnx2_enable_msix(bp, msix_vecs);
6189
6190        if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6191            !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6192                if (pci_enable_msi(bp->pdev) == 0) {
6193                        bp->flags |= BNX2_FLAG_USING_MSI;
6194                        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6195                                bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6196                                bp->irq_tbl[0].handler = bnx2_msi_1shot;
6197                        } else
6198                                bp->irq_tbl[0].handler = bnx2_msi;
6199
6200                        bp->irq_tbl[0].vector = bp->pdev->irq;
6201                }
6202        }
6203
6204        bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6205        netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6206
6207        bp->num_rx_rings = bp->irq_nvecs;
6208        return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6209}
6210
6211/* Called with rtnl_lock */
6212static int
6213bnx2_open(struct net_device *dev)
6214{
6215        struct bnx2 *bp = netdev_priv(dev);
6216        int rc;
6217
6218        netif_carrier_off(dev);
6219
6220        bnx2_set_power_state(bp, PCI_D0);
6221        bnx2_disable_int(bp);
6222
6223        rc = bnx2_setup_int_mode(bp, disable_msi);
6224        if (rc)
6225                goto open_err;
6226        bnx2_init_napi(bp);
6227        bnx2_napi_enable(bp);
6228        rc = bnx2_alloc_mem(bp);
6229        if (rc)
6230                goto open_err;
6231
6232        rc = bnx2_request_irq(bp);
6233        if (rc)
6234                goto open_err;
6235
6236        rc = bnx2_init_nic(bp, 1);
6237        if (rc)
6238                goto open_err;
6239
6240        mod_timer(&bp->timer, jiffies + bp->current_interval);
6241
6242        atomic_set(&bp->intr_sem, 0);
6243
6244        memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6245
6246        bnx2_enable_int(bp);
6247
6248        if (bp->flags & BNX2_FLAG_USING_MSI) {
6249                /* Test MSI to make sure it is working
6250                 * If MSI test fails, go back to INTx mode
6251                 */
6252                if (bnx2_test_intr(bp) != 0) {
6253                        netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6254
6255                        bnx2_disable_int(bp);
6256                        bnx2_free_irq(bp);
6257
6258                        bnx2_setup_int_mode(bp, 1);
6259
6260                        rc = bnx2_init_nic(bp, 0);
6261
6262                        if (!rc)
6263                                rc = bnx2_request_irq(bp);
6264
6265                        if (rc) {
6266                                del_timer_sync(&bp->timer);
6267                                goto open_err;
6268                        }
6269                        bnx2_enable_int(bp);
6270                }
6271        }
6272        if (bp->flags & BNX2_FLAG_USING_MSI)
6273                netdev_info(dev, "using MSI\n");
6274        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6275                netdev_info(dev, "using MSIX\n");
6276
6277        netif_tx_start_all_queues(dev);
6278
6279        return 0;
6280
6281open_err:
6282        bnx2_napi_disable(bp);
6283        bnx2_free_skbs(bp);
6284        bnx2_free_irq(bp);
6285        bnx2_free_mem(bp);
6286        bnx2_del_napi(bp);
6287        return rc;
6288}
6289
6290static void
6291bnx2_reset_task(struct work_struct *work)
6292{
6293        struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6294
6295        rtnl_lock();
6296        if (!netif_running(bp->dev)) {
6297                rtnl_unlock();
6298                return;
6299        }
6300
6301        bnx2_netif_stop(bp, true);
6302
6303        bnx2_init_nic(bp, 1);
6304
6305        atomic_set(&bp->intr_sem, 1);
6306        bnx2_netif_start(bp, true);
6307        rtnl_unlock();
6308}
6309
6310static void
6311bnx2_dump_state(struct bnx2 *bp)
6312{
6313        struct net_device *dev = bp->dev;
6314        u32 mcp_p0, mcp_p1, val1, val2;
6315
6316        pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6317        netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6318                   atomic_read(&bp->intr_sem), val1);
6319        pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6320        pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6321        netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6322        netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6323                   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6324                   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6325        netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6326                   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6327        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6328                mcp_p0 = BNX2_MCP_STATE_P0;
6329                mcp_p1 = BNX2_MCP_STATE_P1;
6330        } else {
6331                mcp_p0 = BNX2_MCP_STATE_P0_5708;
6332                mcp_p1 = BNX2_MCP_STATE_P1_5708;
6333        }
6334        netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6335                   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6336        netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6337                   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6338        if (bp->flags & BNX2_FLAG_USING_MSIX)
6339                netdev_err(dev, "DEBUG: PBA[%08x]\n",
6340                           REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6341}
6342
6343static void
6344bnx2_tx_timeout(struct net_device *dev)
6345{
6346        struct bnx2 *bp = netdev_priv(dev);
6347
6348        bnx2_dump_state(bp);
6349
6350        /* This allows the netif to be shutdown gracefully before resetting */
6351        schedule_work(&bp->reset_task);
6352}
6353
6354/* Called with netif_tx_lock.
6355 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6356 * netif_wake_queue().
6357 */
6358static netdev_tx_t
6359bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6360{
6361        struct bnx2 *bp = netdev_priv(dev);
6362        dma_addr_t mapping;
6363        struct tx_bd *txbd;
6364        struct sw_tx_bd *tx_buf;
6365        u32 len, vlan_tag_flags, last_frag, mss;
6366        u16 prod, ring_prod;
6367        int i;
6368        struct bnx2_napi *bnapi;
6369        struct bnx2_tx_ring_info *txr;
6370        struct netdev_queue *txq;
6371
6372        /*  Determine which tx ring we will be placed on */
6373        i = skb_get_queue_mapping(skb);
6374        bnapi = &bp->bnx2_napi[i];
6375        txr = &bnapi->tx_ring;
6376        txq = netdev_get_tx_queue(dev, i);
6377
6378        if (unlikely(bnx2_tx_avail(bp, txr) <
6379            (skb_shinfo(skb)->nr_frags + 1))) {
6380                netif_tx_stop_queue(txq);
6381                netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6382
6383                return NETDEV_TX_BUSY;
6384        }
6385        len = skb_headlen(skb);
6386        prod = txr->tx_prod;
6387        ring_prod = TX_RING_IDX(prod);
6388
6389        vlan_tag_flags = 0;
6390        if (skb->ip_summed == CHECKSUM_PARTIAL) {
6391                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6392        }
6393
6394        if (vlan_tx_tag_present(skb)) {
6395                vlan_tag_flags |=
6396                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6397        }
6398
6399        if ((mss = skb_shinfo(skb)->gso_size)) {
6400                u32 tcp_opt_len;
6401                struct iphdr *iph;
6402
6403                vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6404
6405                tcp_opt_len = tcp_optlen(skb);
6406
6407                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6408                        u32 tcp_off = skb_transport_offset(skb) -
6409                                      sizeof(struct ipv6hdr) - ETH_HLEN;
6410
6411                        vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6412                                          TX_BD_FLAGS_SW_FLAGS;
6413                        if (likely(tcp_off == 0))
6414                                vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6415                        else {
6416                                tcp_off >>= 3;
6417                                vlan_tag_flags |= ((tcp_off & 0x3) <<
6418                                                   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6419                                                  ((tcp_off & 0x10) <<
6420                                                   TX_BD_FLAGS_TCP6_OFF4_SHL);
6421                                mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6422                        }
6423                } else {
6424                        iph = ip_hdr(skb);
6425                        if (tcp_opt_len || (iph->ihl > 5)) {
6426                                vlan_tag_flags |= ((iph->ihl - 5) +
6427                                                   (tcp_opt_len >> 2)) << 8;
6428                        }
6429                }
6430        } else
6431                mss = 0;
6432
6433        mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6434        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6435                dev_kfree_skb(skb);
6436                return NETDEV_TX_OK;
6437        }
6438
6439        tx_buf = &txr->tx_buf_ring[ring_prod];
6440        tx_buf->skb = skb;
6441        dma_unmap_addr_set(tx_buf, mapping, mapping);
6442
6443        txbd = &txr->tx_desc_ring[ring_prod];
6444
6445        txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6446        txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6447        txbd->tx_bd_mss_nbytes = len | (mss << 16);
6448        txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6449
6450        last_frag = skb_shinfo(skb)->nr_frags;
6451        tx_buf->nr_frags = last_frag;
6452        tx_buf->is_gso = skb_is_gso(skb);
6453
6454        for (i = 0; i < last_frag; i++) {
6455                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6456
6457                prod = NEXT_TX_BD(prod);
6458                ring_prod = TX_RING_IDX(prod);
6459                txbd = &txr->tx_desc_ring[ring_prod];
6460
6461                len = frag->size;
6462                mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6463                                       len, PCI_DMA_TODEVICE);
6464                if (dma_mapping_error(&bp->pdev->dev, mapping))
6465                        goto dma_error;
6466                dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6467                                   mapping);
6468
6469                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6470                txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6471                txbd->tx_bd_mss_nbytes = len | (mss << 16);
6472                txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6473
6474        }
6475        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6476
6477        prod = NEXT_TX_BD(prod);
6478        txr->tx_prod_bseq += skb->len;
6479
6480        REG_WR16(bp, txr->tx_bidx_addr, prod);
6481        REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6482
6483        mmiowb();
6484
6485        txr->tx_prod = prod;
6486
6487        if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6488                netif_tx_stop_queue(txq);
6489
6490                /* netif_tx_stop_queue() must be done before checking
6491                 * tx index in bnx2_tx_avail() below, because in
6492                 * bnx2_tx_int(), we update tx index before checking for
6493                 * netif_tx_queue_stopped().
6494                 */
6495                smp_mb();
6496                if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6497                        netif_tx_wake_queue(txq);
6498        }
6499
6500        return NETDEV_TX_OK;
6501dma_error:
6502        /* save value of frag that failed */
6503        last_frag = i;
6504
6505        /* start back at beginning and unmap skb */
6506        prod = txr->tx_prod;
6507        ring_prod = TX_RING_IDX(prod);
6508        tx_buf = &txr->tx_buf_ring[ring_prod];
6509        tx_buf->skb = NULL;
6510        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6511                         skb_headlen(skb), PCI_DMA_TODEVICE);
6512
6513        /* unmap remaining mapped pages */
6514        for (i = 0; i < last_frag; i++) {
6515                prod = NEXT_TX_BD(prod);
6516                ring_prod = TX_RING_IDX(prod);
6517                tx_buf = &txr->tx_buf_ring[ring_prod];
6518                dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6519                               skb_shinfo(skb)->frags[i].size,
6520                               PCI_DMA_TODEVICE);
6521        }
6522
6523        dev_kfree_skb(skb);
6524        return NETDEV_TX_OK;
6525}
6526
6527/* Called with rtnl_lock */
6528static int
6529bnx2_close(struct net_device *dev)
6530{
6531        struct bnx2 *bp = netdev_priv(dev);
6532
6533        cancel_work_sync(&bp->reset_task);
6534
6535        bnx2_disable_int_sync(bp);
6536        bnx2_napi_disable(bp);
6537        del_timer_sync(&bp->timer);
6538        bnx2_shutdown_chip(bp);
6539        bnx2_free_irq(bp);
6540        bnx2_free_skbs(bp);
6541        bnx2_free_mem(bp);
6542        bnx2_del_napi(bp);
6543        bp->link_up = 0;
6544        netif_carrier_off(bp->dev);
6545        bnx2_set_power_state(bp, PCI_D3hot);
6546        return 0;
6547}
6548
6549static void
6550bnx2_save_stats(struct bnx2 *bp)
6551{
6552        u32 *hw_stats = (u32 *) bp->stats_blk;
6553        u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6554        int i;
6555
6556        /* The 1st 10 counters are 64-bit counters */
6557        for (i = 0; i < 20; i += 2) {
6558                u32 hi;
6559                u64 lo;
6560
6561                hi = temp_stats[i] + hw_stats[i];
6562                lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6563                if (lo > 0xffffffff)
6564                        hi++;
6565                temp_stats[i] = hi;
6566                temp_stats[i + 1] = lo & 0xffffffff;
6567        }
6568
6569        for ( ; i < sizeof(struct statistics_block) / 4; i++)
6570                temp_stats[i] += hw_stats[i];
6571}
6572
6573#define GET_64BIT_NET_STATS64(ctr)              \
6574        (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6575
6576#define GET_64BIT_NET_STATS(ctr)                                \
6577        GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6578        GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6579
6580#define GET_32BIT_NET_STATS(ctr)                                \
6581        (unsigned long) (bp->stats_blk->ctr +                   \
6582                         bp->temp_stats_blk->ctr)
6583
6584static struct rtnl_link_stats64 *
6585bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6586{
6587        struct bnx2 *bp = netdev_priv(dev);
6588
6589        if (bp->stats_blk == NULL)
6590                return net_stats;
6591
6592        net_stats->rx_packets =
6593                GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6594                GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6595                GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6596
6597        net_stats->tx_packets =
6598                GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6599                GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6600                GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6601
6602        net_stats->rx_bytes =
6603                GET_64BIT_NET_STATS(stat_IfHCInOctets);
6604
6605        net_stats->tx_bytes =
6606                GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6607
6608        net_stats->multicast =
6609                GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6610
6611        net_stats->collisions =
6612                GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6613
6614        net_stats->rx_length_errors =
6615                GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6616                GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6617
6618        net_stats->rx_over_errors =
6619                GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6620                GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6621
6622        net_stats->rx_frame_errors =
6623                GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6624
6625        net_stats->rx_crc_errors =
6626                GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6627
6628        net_stats->rx_errors = net_stats->rx_length_errors +
6629                net_stats->rx_over_errors + net_stats->rx_frame_errors +
6630                net_stats->rx_crc_errors;
6631
6632        net_stats->tx_aborted_errors =
6633                GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6634                GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6635
6636        if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6637            (CHIP_ID(bp) == CHIP_ID_5708_A0))
6638                net_stats->tx_carrier_errors = 0;
6639        else {
6640                net_stats->tx_carrier_errors =
6641                        GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6642        }
6643
6644        net_stats->tx_errors =
6645                GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6646                net_stats->tx_aborted_errors +
6647                net_stats->tx_carrier_errors;
6648
6649        net_stats->rx_missed_errors =
6650                GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6651                GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6652                GET_32BIT_NET_STATS(stat_FwRxDrop);
6653
6654        return net_stats;
6655}
6656
6657/* All ethtool functions called with rtnl_lock */
6658
6659static int
6660bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6661{
6662        struct bnx2 *bp = netdev_priv(dev);
6663        int support_serdes = 0, support_copper = 0;
6664
6665        cmd->supported = SUPPORTED_Autoneg;
6666        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6667                support_serdes = 1;
6668                support_copper = 1;
6669        } else if (bp->phy_port == PORT_FIBRE)
6670                support_serdes = 1;
6671        else
6672                support_copper = 1;
6673
6674        if (support_serdes) {
6675                cmd->supported |= SUPPORTED_1000baseT_Full |
6676                        SUPPORTED_FIBRE;
6677                if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6678                        cmd->supported |= SUPPORTED_2500baseX_Full;
6679
6680        }
6681        if (support_copper) {
6682                cmd->supported |= SUPPORTED_10baseT_Half |
6683                        SUPPORTED_10baseT_Full |
6684                        SUPPORTED_100baseT_Half |
6685                        SUPPORTED_100baseT_Full |
6686                        SUPPORTED_1000baseT_Full |
6687                        SUPPORTED_TP;
6688
6689        }
6690
6691        spin_lock_bh(&bp->phy_lock);
6692        cmd->port = bp->phy_port;
6693        cmd->advertising = bp->advertising;
6694
6695        if (bp->autoneg & AUTONEG_SPEED) {
6696                cmd->autoneg = AUTONEG_ENABLE;
6697        }
6698        else {
6699                cmd->autoneg = AUTONEG_DISABLE;
6700        }
6701
6702        if (netif_carrier_ok(dev)) {
6703                cmd->speed = bp->line_speed;
6704                cmd->duplex = bp->duplex;
6705        }
6706        else {
6707                cmd->speed = -1;
6708                cmd->duplex = -1;
6709        }
6710        spin_unlock_bh(&bp->phy_lock);
6711
6712        cmd->transceiver = XCVR_INTERNAL;
6713        cmd->phy_address = bp->phy_addr;
6714
6715        return 0;
6716}
6717
6718static int
6719bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6720{
6721        struct bnx2 *bp = netdev_priv(dev);
6722        u8 autoneg = bp->autoneg;
6723        u8 req_duplex = bp->req_duplex;
6724        u16 req_line_speed = bp->req_line_speed;
6725        u32 advertising = bp->advertising;
6726        int err = -EINVAL;
6727
6728        spin_lock_bh(&bp->phy_lock);
6729
6730        if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6731                goto err_out_unlock;
6732
6733        if (cmd->port != bp->phy_port &&
6734            !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6735                goto err_out_unlock;
6736
6737        /* If device is down, we can store the settings only if the user
6738         * is setting the currently active port.
6739         */
6740        if (!netif_running(dev) && cmd->port != bp->phy_port)
6741                goto err_out_unlock;
6742
6743        if (cmd->autoneg == AUTONEG_ENABLE) {
6744                autoneg |= AUTONEG_SPEED;
6745
6746                advertising = cmd->advertising;
6747                if (cmd->port == PORT_TP) {
6748                        advertising &= ETHTOOL_ALL_COPPER_SPEED;
6749                        if (!advertising)
6750                                advertising = ETHTOOL_ALL_COPPER_SPEED;
6751                } else {
6752                        advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6753                        if (!advertising)
6754                                advertising = ETHTOOL_ALL_FIBRE_SPEED;
6755                }
6756                advertising |= ADVERTISED_Autoneg;
6757        }
6758        else {
6759                if (cmd->port == PORT_FIBRE) {
6760                        if ((cmd->speed != SPEED_1000 &&
6761                             cmd->speed != SPEED_2500) ||
6762                            (cmd->duplex != DUPLEX_FULL))
6763                                goto err_out_unlock;
6764
6765                        if (cmd->speed == SPEED_2500 &&
6766                            !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6767                                goto err_out_unlock;
6768                }
6769                else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6770                        goto err_out_unlock;
6771
6772                autoneg &= ~AUTONEG_SPEED;
6773                req_line_speed = cmd->speed;
6774                req_duplex = cmd->duplex;
6775                advertising = 0;
6776        }
6777
6778        bp->autoneg = autoneg;
6779        bp->advertising = advertising;
6780        bp->req_line_speed = req_line_speed;
6781        bp->req_duplex = req_duplex;
6782
6783        err = 0;
6784        /* If device is down, the new settings will be picked up when it is
6785         * brought up.
6786         */
6787        if (netif_running(dev))
6788                err = bnx2_setup_phy(bp, cmd->port);
6789
6790err_out_unlock:
6791        spin_unlock_bh(&bp->phy_lock);
6792
6793        return err;
6794}
6795
6796static void
6797bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6798{
6799        struct bnx2 *bp = netdev_priv(dev);
6800
6801        strcpy(info->driver, DRV_MODULE_NAME);
6802        strcpy(info->version, DRV_MODULE_VERSION);
6803        strcpy(info->bus_info, pci_name(bp->pdev));
6804        strcpy(info->fw_version, bp->fw_version);
6805}
6806
6807#define BNX2_REGDUMP_LEN                (32 * 1024)
6808
6809static int
6810bnx2_get_regs_len(struct net_device *dev)
6811{
6812        return BNX2_REGDUMP_LEN;
6813}
6814
6815static void
6816bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6817{
6818        u32 *p = _p, i, offset;
6819        u8 *orig_p = _p;
6820        struct bnx2 *bp = netdev_priv(dev);
6821        static const u32 reg_boundaries[] = {
6822                0x0000, 0x0098, 0x0400, 0x045c,
6823                0x0800, 0x0880, 0x0c00, 0x0c10,
6824                0x0c30, 0x0d08, 0x1000, 0x101c,
6825                0x1040, 0x1048, 0x1080, 0x10a4,
6826                0x1400, 0x1490, 0x1498, 0x14f0,
6827                0x1500, 0x155c, 0x1580, 0x15dc,
6828                0x1600, 0x1658, 0x1680, 0x16d8,
6829                0x1800, 0x1820, 0x1840, 0x1854,
6830                0x1880, 0x1894, 0x1900, 0x1984,
6831                0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6832                0x1c80, 0x1c94, 0x1d00, 0x1d84,
6833                0x2000, 0x2030, 0x23c0, 0x2400,
6834                0x2800, 0x2820, 0x2830, 0x2850,
6835                0x2b40, 0x2c10, 0x2fc0, 0x3058,
6836                0x3c00, 0x3c94, 0x4000, 0x4010,
6837                0x4080, 0x4090, 0x43c0, 0x4458,
6838                0x4c00, 0x4c18, 0x4c40, 0x4c54,
6839                0x4fc0, 0x5010, 0x53c0, 0x5444,
6840                0x5c00, 0x5c18, 0x5c80, 0x5c90,
6841                0x5fc0, 0x6000, 0x6400, 0x6428,
6842                0x6800, 0x6848, 0x684c, 0x6860,
6843                0x6888, 0x6910, 0x8000
6844        };
6845
6846        regs->version = 0;
6847
6848        memset(p, 0, BNX2_REGDUMP_LEN);
6849
6850        if (!netif_running(bp->dev))
6851                return;
6852
6853        i = 0;
6854        offset = reg_boundaries[0];
6855        p += offset;
6856        while (offset < BNX2_REGDUMP_LEN) {
6857                *p++ = REG_RD(bp, offset);
6858                offset += 4;
6859                if (offset == reg_boundaries[i + 1]) {
6860                        offset = reg_boundaries[i + 2];
6861                        p = (u32 *) (orig_p + offset);
6862                        i += 2;
6863                }
6864        }
6865}
6866
6867static void
6868bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6869{
6870        struct bnx2 *bp = netdev_priv(dev);
6871
6872        if (bp->flags & BNX2_FLAG_NO_WOL) {
6873                wol->supported = 0;
6874                wol->wolopts = 0;
6875        }
6876        else {
6877                wol->supported = WAKE_MAGIC;
6878                if (bp->wol)
6879                        wol->wolopts = WAKE_MAGIC;
6880                else
6881                        wol->wolopts = 0;
6882        }
6883        memset(&wol->sopass, 0, sizeof(wol->sopass));
6884}
6885
6886static int
6887bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6888{
6889        struct bnx2 *bp = netdev_priv(dev);
6890
6891        if (wol->wolopts & ~WAKE_MAGIC)
6892                return -EINVAL;
6893
6894        if (wol->wolopts & WAKE_MAGIC) {
6895                if (bp->flags & BNX2_FLAG_NO_WOL)
6896                        return -EINVAL;
6897
6898                bp->wol = 1;
6899        }
6900        else {
6901                bp->wol = 0;
6902        }
6903        return 0;
6904}
6905
6906static int
6907bnx2_nway_reset(struct net_device *dev)
6908{
6909        struct bnx2 *bp = netdev_priv(dev);
6910        u32 bmcr;
6911
6912        if (!netif_running(dev))
6913                return -EAGAIN;
6914
6915        if (!(bp->autoneg & AUTONEG_SPEED)) {
6916                return -EINVAL;
6917        }
6918
6919        spin_lock_bh(&bp->phy_lock);
6920
6921        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6922                int rc;
6923
6924                rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6925                spin_unlock_bh(&bp->phy_lock);
6926                return rc;
6927        }
6928
6929        /* Force a link down visible on the other side */
6930        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6931                bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6932                spin_unlock_bh(&bp->phy_lock);
6933
6934                msleep(20);
6935
6936                spin_lock_bh(&bp->phy_lock);
6937
6938                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6939                bp->serdes_an_pending = 1;
6940                mod_timer(&bp->timer, jiffies + bp->current_interval);
6941        }
6942
6943        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6944        bmcr &= ~BMCR_LOOPBACK;
6945        bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6946
6947        spin_unlock_bh(&bp->phy_lock);
6948
6949        return 0;
6950}
6951
6952static u32
6953bnx2_get_link(struct net_device *dev)
6954{
6955        struct bnx2 *bp = netdev_priv(dev);
6956
6957        return bp->link_up;
6958}
6959
6960static int
6961bnx2_get_eeprom_len(struct net_device *dev)
6962{
6963        struct bnx2 *bp = netdev_priv(dev);
6964
6965        if (bp->flash_info == NULL)
6966                return 0;
6967
6968        return (int) bp->flash_size;
6969}
6970
6971static int
6972bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6973                u8 *eebuf)
6974{
6975        struct bnx2 *bp = netdev_priv(dev);
6976        int rc;
6977
6978        if (!netif_running(dev))
6979                return -EAGAIN;
6980
6981        /* parameters already validated in ethtool_get_eeprom */
6982
6983        rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6984
6985        return rc;
6986}
6987
6988static int
6989bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6990                u8 *eebuf)
6991{
6992        struct bnx2 *bp = netdev_priv(dev);
6993        int rc;
6994
6995        if (!netif_running(dev))
6996                return -EAGAIN;
6997
6998        /* parameters already validated in ethtool_set_eeprom */
6999
7000        rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7001
7002        return rc;
7003}
7004
7005static int
7006bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7007{
7008        struct bnx2 *bp = netdev_priv(dev);
7009
7010        memset(coal, 0, sizeof(struct ethtool_coalesce));
7011
7012        coal->rx_coalesce_usecs = bp->rx_ticks;
7013        coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7014        coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7015        coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7016
7017        coal->tx_coalesce_usecs = bp->tx_ticks;
7018        coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7019        coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7020        coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7021
7022        coal->stats_block_coalesce_usecs = bp->stats_ticks;
7023
7024        return 0;
7025}
7026
7027static int
7028bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7029{
7030        struct bnx2 *bp = netdev_priv(dev);
7031
7032        bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7033        if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7034
7035        bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7036        if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7037
7038        bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7039        if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7040
7041        bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7042        if (bp->rx_quick_cons_trip_int > 0xff)
7043                bp->rx_quick_cons_trip_int = 0xff;
7044
7045        bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7046        if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7047
7048        bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7049        if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7050
7051        bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7052        if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7053
7054        bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7055        if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7056                0xff;
7057
7058        bp->stats_ticks = coal->stats_block_coalesce_usecs;
7059        if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7060                if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7061                        bp->stats_ticks = USEC_PER_SEC;
7062        }
7063        if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7064                bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7065        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7066
7067        if (netif_running(bp->dev)) {
7068                bnx2_netif_stop(bp, true);
7069                bnx2_init_nic(bp, 0);
7070                bnx2_netif_start(bp, true);
7071        }
7072
7073        return 0;
7074}
7075
7076static void
7077bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7078{
7079        struct bnx2 *bp = netdev_priv(dev);
7080
7081        ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7082        ering->rx_mini_max_pending = 0;
7083        ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7084
7085        ering->rx_pending = bp->rx_ring_size;
7086        ering->rx_mini_pending = 0;
7087        ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7088
7089        ering->tx_max_pending = MAX_TX_DESC_CNT;
7090        ering->tx_pending = bp->tx_ring_size;
7091}
7092
7093static int
7094bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7095{
7096        if (netif_running(bp->dev)) {
7097                /* Reset will erase chipset stats; save them */
7098                bnx2_save_stats(bp);
7099
7100                bnx2_netif_stop(bp, true);
7101                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7102                __bnx2_free_irq(bp);
7103                bnx2_free_skbs(bp);
7104                bnx2_free_mem(bp);
7105        }
7106
7107        bnx2_set_rx_ring_size(bp, rx);
7108        bp->tx_ring_size = tx;
7109
7110        if (netif_running(bp->dev)) {
7111                int rc;
7112
7113                rc = bnx2_alloc_mem(bp);
7114                if (!rc)
7115                        rc = bnx2_request_irq(bp);
7116
7117                if (!rc)
7118                        rc = bnx2_init_nic(bp, 0);
7119
7120                if (rc) {
7121                        bnx2_napi_enable(bp);
7122                        dev_close(bp->dev);
7123                        return rc;
7124                }
7125#ifdef BCM_CNIC
7126                mutex_lock(&bp->cnic_lock);
7127                /* Let cnic know about the new status block. */
7128                if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7129                        bnx2_setup_cnic_irq_info(bp);
7130                mutex_unlock(&bp->cnic_lock);
7131#endif
7132                bnx2_netif_start(bp, true);
7133        }
7134        return 0;
7135}
7136
7137static int
7138bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7139{
7140        struct bnx2 *bp = netdev_priv(dev);
7141        int rc;
7142
7143        if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7144                (ering->tx_pending > MAX_TX_DESC_CNT) ||
7145                (ering->tx_pending <= MAX_SKB_FRAGS)) {
7146
7147                return -EINVAL;
7148        }
7149        rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7150        return rc;
7151}
7152
7153static void
7154bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7155{
7156        struct bnx2 *bp = netdev_priv(dev);
7157
7158        epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7159        epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7160        epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7161}
7162
7163static int
7164bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7165{
7166        struct bnx2 *bp = netdev_priv(dev);
7167
7168        bp->req_flow_ctrl = 0;
7169        if (epause->rx_pause)
7170                bp->req_flow_ctrl |= FLOW_CTRL_RX;
7171        if (epause->tx_pause)
7172                bp->req_flow_ctrl |= FLOW_CTRL_TX;
7173
7174        if (epause->autoneg) {
7175                bp->autoneg |= AUTONEG_FLOW_CTRL;
7176        }
7177        else {
7178                bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7179        }
7180
7181        if (netif_running(dev)) {
7182                spin_lock_bh(&bp->phy_lock);
7183                bnx2_setup_phy(bp, bp->phy_port);
7184                spin_unlock_bh(&bp->phy_lock);
7185        }
7186
7187        return 0;
7188}
7189
7190static u32
7191bnx2_get_rx_csum(struct net_device *dev)
7192{
7193        struct bnx2 *bp = netdev_priv(dev);
7194
7195        return bp->rx_csum;
7196}
7197
7198static int
7199bnx2_set_rx_csum(struct net_device *dev, u32 data)
7200{
7201        struct bnx2 *bp = netdev_priv(dev);
7202
7203        bp->rx_csum = data;
7204        return 0;
7205}
7206
7207static int
7208bnx2_set_tso(struct net_device *dev, u32 data)
7209{
7210        struct bnx2 *bp = netdev_priv(dev);
7211
7212        if (data) {
7213                dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7214                if (CHIP_NUM(bp) == CHIP_NUM_5709)
7215                        dev->features |= NETIF_F_TSO6;
7216        } else
7217                dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7218                                   NETIF_F_TSO_ECN);
7219        return 0;
7220}
7221
7222static struct {
7223        char string[ETH_GSTRING_LEN];
7224} bnx2_stats_str_arr[] = {
7225        { "rx_bytes" },
7226        { "rx_error_bytes" },
7227        { "tx_bytes" },
7228        { "tx_error_bytes" },
7229        { "rx_ucast_packets" },
7230        { "rx_mcast_packets" },
7231        { "rx_bcast_packets" },
7232        { "tx_ucast_packets" },
7233        { "tx_mcast_packets" },
7234        { "tx_bcast_packets" },
7235        { "tx_mac_errors" },
7236        { "tx_carrier_errors" },
7237        { "rx_crc_errors" },
7238        { "rx_align_errors" },
7239        { "tx_single_collisions" },
7240        { "tx_multi_collisions" },
7241        { "tx_deferred" },
7242        { "tx_excess_collisions" },
7243        { "tx_late_collisions" },
7244        { "tx_total_collisions" },
7245        { "rx_fragments" },
7246        { "rx_jabbers" },
7247        { "rx_undersize_packets" },
7248        { "rx_oversize_packets" },
7249        { "rx_64_byte_packets" },
7250        { "rx_65_to_127_byte_packets" },
7251        { "rx_128_to_255_byte_packets" },
7252        { "rx_256_to_511_byte_packets" },
7253        { "rx_512_to_1023_byte_packets" },
7254        { "rx_1024_to_1522_byte_packets" },
7255        { "rx_1523_to_9022_byte_packets" },
7256        { "tx_64_byte_packets" },
7257        { "tx_65_to_127_byte_packets" },
7258        { "tx_128_to_255_byte_packets" },
7259        { "tx_256_to_511_byte_packets" },
7260        { "tx_512_to_1023_byte_packets" },
7261        { "tx_1024_to_1522_byte_packets" },
7262        { "tx_1523_to_9022_byte_packets" },
7263        { "rx_xon_frames" },
7264        { "rx_xoff_frames" },
7265        { "tx_xon_frames" },
7266        { "tx_xoff_frames" },
7267        { "rx_mac_ctrl_frames" },
7268        { "rx_filtered_packets" },
7269        { "rx_ftq_discards" },
7270        { "rx_discards" },
7271        { "rx_fw_discards" },
7272};
7273
7274#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7275                        sizeof(bnx2_stats_str_arr[0]))
7276
7277#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7278
7279static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7280    STATS_OFFSET32(stat_IfHCInOctets_hi),
7281    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7282    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7283    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7284    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7285    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7286    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7287    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7288    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7289    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7290    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7291    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7292    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7293    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7294    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7295    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7296    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7297    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7298    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7299    STATS_OFFSET32(stat_EtherStatsCollisions),
7300    STATS_OFFSET32(stat_EtherStatsFragments),
7301    STATS_OFFSET32(stat_EtherStatsJabbers),
7302    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7303    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7304    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7305    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7306    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7307    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7308    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7309    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7310    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7311    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7312    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7313    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7314    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7315    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7316    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7317    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7318    STATS_OFFSET32(stat_XonPauseFramesReceived),
7319    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7320    STATS_OFFSET32(stat_OutXonSent),
7321    STATS_OFFSET32(stat_OutXoffSent),
7322    STATS_OFFSET32(stat_MacControlFramesReceived),
7323    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7324    STATS_OFFSET32(stat_IfInFTQDiscards),
7325    STATS_OFFSET32(stat_IfInMBUFDiscards),
7326    STATS_OFFSET32(stat_FwRxDrop),
7327};
7328
7329/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7330 * skipped because of errata.
7331 */
7332static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7333        8,0,8,8,8,8,8,8,8,8,
7334        4,0,4,4,4,4,4,4,4,4,
7335        4,4,4,4,4,4,4,4,4,4,
7336        4,4,4,4,4,4,4,4,4,4,
7337        4,4,4,4,4,4,4,
7338};
7339
7340static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7341        8,0,8,8,8,8,8,8,8,8,
7342        4,4,4,4,4,4,4,4,4,4,
7343        4,4,4,4,4,4,4,4,4,4,
7344        4,4,4,4,4,4,4,4,4,4,
7345        4,4,4,4,4,4,4,
7346};
7347
7348#define BNX2_NUM_TESTS 6
7349
7350static struct {
7351        char string[ETH_GSTRING_LEN];
7352} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7353        { "register_test (offline)" },
7354        { "memory_test (offline)" },
7355        { "loopback_test (offline)" },
7356        { "nvram_test (online)" },
7357        { "interrupt_test (online)" },
7358        { "link_test (online)" },
7359};
7360
7361static int
7362bnx2_get_sset_count(struct net_device *dev, int sset)
7363{
7364        switch (sset) {
7365        case ETH_SS_TEST:
7366                return BNX2_NUM_TESTS;
7367        case ETH_SS_STATS:
7368                return BNX2_NUM_STATS;
7369        default:
7370                return -EOPNOTSUPP;
7371        }
7372}
7373
7374static void
7375bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7376{
7377        struct bnx2 *bp = netdev_priv(dev);
7378
7379        bnx2_set_power_state(bp, PCI_D0);
7380
7381        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7382        if (etest->flags & ETH_TEST_FL_OFFLINE) {
7383                int i;
7384
7385                bnx2_netif_stop(bp, true);
7386                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7387                bnx2_free_skbs(bp);
7388
7389                if (bnx2_test_registers(bp) != 0) {
7390                        buf[0] = 1;
7391                        etest->flags |= ETH_TEST_FL_FAILED;
7392                }
7393                if (bnx2_test_memory(bp) != 0) {
7394                        buf[1] = 1;
7395                        etest->flags |= ETH_TEST_FL_FAILED;
7396                }
7397                if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7398                        etest->flags |= ETH_TEST_FL_FAILED;
7399
7400                if (!netif_running(bp->dev))
7401                        bnx2_shutdown_chip(bp);
7402                else {
7403                        bnx2_init_nic(bp, 1);
7404                        bnx2_netif_start(bp, true);
7405                }
7406
7407                /* wait for link up */
7408                for (i = 0; i < 7; i++) {
7409                        if (bp->link_up)
7410                                break;
7411                        msleep_interruptible(1000);
7412                }
7413        }
7414
7415        if (bnx2_test_nvram(bp) != 0) {
7416                buf[3] = 1;
7417                etest->flags |= ETH_TEST_FL_FAILED;
7418        }
7419        if (bnx2_test_intr(bp) != 0) {
7420                buf[4] = 1;
7421                etest->flags |= ETH_TEST_FL_FAILED;
7422        }
7423
7424        if (bnx2_test_link(bp) != 0) {
7425                buf[5] = 1;
7426                etest->flags |= ETH_TEST_FL_FAILED;
7427
7428        }
7429        if (!netif_running(bp->dev))
7430                bnx2_set_power_state(bp, PCI_D3hot);
7431}
7432
7433static void
7434bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7435{
7436        switch (stringset) {
7437        case ETH_SS_STATS:
7438                memcpy(buf, bnx2_stats_str_arr,
7439                        sizeof(bnx2_stats_str_arr));
7440                break;
7441        case ETH_SS_TEST:
7442                memcpy(buf, bnx2_tests_str_arr,
7443                        sizeof(bnx2_tests_str_arr));
7444                break;
7445        }
7446}
7447
7448static void
7449bnx2_get_ethtool_stats(struct net_device *dev,
7450                struct ethtool_stats *stats, u64 *buf)
7451{
7452        struct bnx2 *bp = netdev_priv(dev);
7453        int i;
7454        u32 *hw_stats = (u32 *) bp->stats_blk;
7455        u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7456        u8 *stats_len_arr = NULL;
7457
7458        if (hw_stats == NULL) {
7459                memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7460                return;
7461        }
7462
7463        if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7464            (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7465            (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7466            (CHIP_ID(bp) == CHIP_ID_5708_A0))
7467                stats_len_arr = bnx2_5706_stats_len_arr;
7468        else
7469                stats_len_arr = bnx2_5708_stats_len_arr;
7470
7471        for (i = 0; i < BNX2_NUM_STATS; i++) {
7472                unsigned long offset;
7473
7474                if (stats_len_arr[i] == 0) {
7475                        /* skip this counter */
7476                        buf[i] = 0;
7477                        continue;
7478                }
7479
7480                offset = bnx2_stats_offset_arr[i];
7481                if (stats_len_arr[i] == 4) {
7482                        /* 4-byte counter */
7483                        buf[i] = (u64) *(hw_stats + offset) +
7484                                 *(temp_stats + offset);
7485                        continue;
7486                }
7487                /* 8-byte counter */
7488                buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7489                         *(hw_stats + offset + 1) +
7490                         (((u64) *(temp_stats + offset)) << 32) +
7491                         *(temp_stats + offset + 1);
7492        }
7493}
7494
7495static int
7496bnx2_phys_id(struct net_device *dev, u32 data)
7497{
7498        struct bnx2 *bp = netdev_priv(dev);
7499        int i;
7500        u32 save;
7501
7502        bnx2_set_power_state(bp, PCI_D0);
7503
7504        if (data == 0)
7505                data = 2;
7506
7507        save = REG_RD(bp, BNX2_MISC_CFG);
7508        REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7509
7510        for (i = 0; i < (data * 2); i++) {
7511                if ((i % 2) == 0) {
7512                        REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7513                }
7514                else {
7515                        REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7516                                BNX2_EMAC_LED_1000MB_OVERRIDE |
7517                                BNX2_EMAC_LED_100MB_OVERRIDE |
7518                                BNX2_EMAC_LED_10MB_OVERRIDE |
7519                                BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7520                                BNX2_EMAC_LED_TRAFFIC);
7521                }
7522                msleep_interruptible(500);
7523                if (signal_pending(current))
7524                        break;
7525        }
7526        REG_WR(bp, BNX2_EMAC_LED, 0);
7527        REG_WR(bp, BNX2_MISC_CFG, save);
7528
7529        if (!netif_running(dev))
7530                bnx2_set_power_state(bp, PCI_D3hot);
7531
7532        return 0;
7533}
7534
7535static int
7536bnx2_set_tx_csum(struct net_device *dev, u32 data)
7537{
7538        struct bnx2 *bp = netdev_priv(dev);
7539
7540        if (CHIP_NUM(bp) == CHIP_NUM_5709)
7541                return ethtool_op_set_tx_ipv6_csum(dev, data);
7542        else
7543                return ethtool_op_set_tx_csum(dev, data);
7544}
7545
7546static int
7547bnx2_set_flags(struct net_device *dev, u32 data)
7548{
7549        struct bnx2 *bp = netdev_priv(dev);
7550        int rc;
7551
7552        if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
7553            !(data & ETH_FLAG_RXVLAN))
7554                return -EINVAL;
7555
7556        /* TSO with VLAN tag won't work with current firmware */
7557        if (!(data & ETH_FLAG_TXVLAN))
7558                return -EINVAL;
7559
7560        rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7561                                  ETH_FLAG_TXVLAN);
7562        if (rc)
7563                return rc;
7564
7565        if ((!!(data & ETH_FLAG_RXVLAN) !=
7566            !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7567            netif_running(dev)) {
7568                bnx2_netif_stop(bp, false);
7569                bnx2_set_rx_mode(dev);
7570                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7571                bnx2_netif_start(bp, false);
7572        }
7573
7574        return 0;
7575}
7576
7577static const struct ethtool_ops bnx2_ethtool_ops = {
7578        .get_settings           = bnx2_get_settings,
7579        .set_settings           = bnx2_set_settings,
7580        .get_drvinfo            = bnx2_get_drvinfo,
7581        .get_regs_len           = bnx2_get_regs_len,
7582        .get_regs               = bnx2_get_regs,
7583        .get_wol                = bnx2_get_wol,
7584        .set_wol                = bnx2_set_wol,
7585        .nway_reset             = bnx2_nway_reset,
7586        .get_link               = bnx2_get_link,
7587        .get_eeprom_len         = bnx2_get_eeprom_len,
7588        .get_eeprom             = bnx2_get_eeprom,
7589        .set_eeprom             = bnx2_set_eeprom,
7590        .get_coalesce           = bnx2_get_coalesce,
7591        .set_coalesce           = bnx2_set_coalesce,
7592        .get_ringparam          = bnx2_get_ringparam,
7593        .set_ringparam          = bnx2_set_ringparam,
7594        .get_pauseparam         = bnx2_get_pauseparam,
7595        .set_pauseparam         = bnx2_set_pauseparam,
7596        .get_rx_csum            = bnx2_get_rx_csum,
7597        .set_rx_csum            = bnx2_set_rx_csum,
7598        .set_tx_csum            = bnx2_set_tx_csum,
7599        .set_sg                 = ethtool_op_set_sg,
7600        .set_tso                = bnx2_set_tso,
7601        .self_test              = bnx2_self_test,
7602        .get_strings            = bnx2_get_strings,
7603        .phys_id                = bnx2_phys_id,
7604        .get_ethtool_stats      = bnx2_get_ethtool_stats,
7605        .get_sset_count         = bnx2_get_sset_count,
7606        .set_flags              = bnx2_set_flags,
7607        .get_flags              = ethtool_op_get_flags,
7608};
7609
7610/* Called with rtnl_lock */
7611static int
7612bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7613{
7614        struct mii_ioctl_data *data = if_mii(ifr);
7615        struct bnx2 *bp = netdev_priv(dev);
7616        int err;
7617
7618        switch(cmd) {
7619        case SIOCGMIIPHY:
7620                data->phy_id = bp->phy_addr;
7621
7622                /* fallthru */
7623        case SIOCGMIIREG: {
7624                u32 mii_regval;
7625
7626                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7627                        return -EOPNOTSUPP;
7628
7629                if (!netif_running(dev))
7630                        return -EAGAIN;
7631
7632                spin_lock_bh(&bp->phy_lock);
7633                err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7634                spin_unlock_bh(&bp->phy_lock);
7635
7636                data->val_out = mii_regval;
7637
7638                return err;
7639        }
7640
7641        case SIOCSMIIREG:
7642                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7643                        return -EOPNOTSUPP;
7644
7645                if (!netif_running(dev))
7646                        return -EAGAIN;
7647
7648                spin_lock_bh(&bp->phy_lock);
7649                err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7650                spin_unlock_bh(&bp->phy_lock);
7651
7652                return err;
7653
7654        default:
7655                /* do nothing */
7656                break;
7657        }
7658        return -EOPNOTSUPP;
7659}
7660
7661/* Called with rtnl_lock */
7662static int
7663bnx2_change_mac_addr(struct net_device *dev, void *p)
7664{
7665        struct sockaddr *addr = p;
7666        struct bnx2 *bp = netdev_priv(dev);
7667
7668        if (!is_valid_ether_addr(addr->sa_data))
7669                return -EINVAL;
7670
7671        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7672        if (netif_running(dev))
7673                bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7674
7675        return 0;
7676}
7677
7678/* Called with rtnl_lock */
7679static int
7680bnx2_change_mtu(struct net_device *dev, int new_mtu)
7681{
7682        struct bnx2 *bp = netdev_priv(dev);
7683
7684        if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7685                ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7686                return -EINVAL;
7687
7688        dev->mtu = new_mtu;
7689        return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7690}
7691
7692#ifdef CONFIG_NET_POLL_CONTROLLER
7693static void
7694poll_bnx2(struct net_device *dev)
7695{
7696        struct bnx2 *bp = netdev_priv(dev);
7697        int i;
7698
7699        for (i = 0; i < bp->irq_nvecs; i++) {
7700                struct bnx2_irq *irq = &bp->irq_tbl[i];
7701
7702                disable_irq(irq->vector);
7703                irq->handler(irq->vector, &bp->bnx2_napi[i]);
7704                enable_irq(irq->vector);
7705        }
7706}
7707#endif
7708
7709static void __devinit
7710bnx2_get_5709_media(struct bnx2 *bp)
7711{
7712        u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7713        u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7714        u32 strap;
7715
7716        if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7717                return;
7718        else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7719                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7720                return;
7721        }
7722
7723        if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7724                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7725        else
7726                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7727
7728        if (PCI_FUNC(bp->pdev->devfn) == 0) {
7729                switch (strap) {
7730                case 0x4:
7731                case 0x5:
7732                case 0x6:
7733                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7734                        return;
7735                }
7736        } else {
7737                switch (strap) {
7738                case 0x1:
7739                case 0x2:
7740                case 0x4:
7741                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7742                        return;
7743                }
7744        }
7745}
7746
7747static void __devinit
7748bnx2_get_pci_speed(struct bnx2 *bp)
7749{
7750        u32 reg;
7751
7752        reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7753        if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7754                u32 clkreg;
7755
7756                bp->flags |= BNX2_FLAG_PCIX;
7757
7758                clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7759
7760                clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7761                switch (clkreg) {
7762                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7763                        bp->bus_speed_mhz = 133;
7764                        break;
7765
7766                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7767                        bp->bus_speed_mhz = 100;
7768                        break;
7769
7770                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7771                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7772                        bp->bus_speed_mhz = 66;
7773                        break;
7774
7775                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7776                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7777                        bp->bus_speed_mhz = 50;
7778                        break;
7779
7780                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7781                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7782                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7783                        bp->bus_speed_mhz = 33;
7784                        break;
7785                }
7786        }
7787        else {
7788                if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7789                        bp->bus_speed_mhz = 66;
7790                else
7791                        bp->bus_speed_mhz = 33;
7792        }
7793
7794        if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7795                bp->flags |= BNX2_FLAG_PCI_32BIT;
7796
7797}
7798
7799static void __devinit
7800bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7801{
7802        int rc, i, j;
7803        u8 *data;
7804        unsigned int block_end, rosize, len;
7805
7806#define BNX2_VPD_NVRAM_OFFSET   0x300
7807#define BNX2_VPD_LEN            128
7808#define BNX2_MAX_VER_SLEN       30
7809
7810        data = kmalloc(256, GFP_KERNEL);
7811        if (!data)
7812                return;
7813
7814        rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7815                             BNX2_VPD_LEN);
7816        if (rc)
7817                goto vpd_done;
7818
7819        for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7820                data[i] = data[i + BNX2_VPD_LEN + 3];
7821                data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7822                data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7823                data[i + 3] = data[i + BNX2_VPD_LEN];
7824        }
7825
7826        i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7827        if (i < 0)
7828                goto vpd_done;
7829
7830        rosize = pci_vpd_lrdt_size(&data[i]);
7831        i += PCI_VPD_LRDT_TAG_SIZE;
7832        block_end = i + rosize;
7833
7834        if (block_end > BNX2_VPD_LEN)
7835                goto vpd_done;
7836
7837        j = pci_vpd_find_info_keyword(data, i, rosize,
7838                                      PCI_VPD_RO_KEYWORD_MFR_ID);
7839        if (j < 0)
7840                goto vpd_done;
7841
7842        len = pci_vpd_info_field_size(&data[j]);
7843
7844        j += PCI_VPD_INFO_FLD_HDR_SIZE;
7845        if (j + len > block_end || len != 4 ||
7846            memcmp(&data[j], "1028", 4))
7847                goto vpd_done;
7848
7849        j = pci_vpd_find_info_keyword(data, i, rosize,
7850                                      PCI_VPD_RO_KEYWORD_VENDOR0);
7851        if (j < 0)
7852                goto vpd_done;
7853
7854        len = pci_vpd_info_field_size(&data[j]);
7855
7856        j += PCI_VPD_INFO_FLD_HDR_SIZE;
7857        if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7858                goto vpd_done;
7859
7860        memcpy(bp->fw_version, &data[j], len);
7861        bp->fw_version[len] = ' ';
7862
7863vpd_done:
7864        kfree(data);
7865}
7866
7867static int __devinit
7868bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7869{
7870        struct bnx2 *bp;
7871        unsigned long mem_len;
7872        int rc, i, j;
7873        u32 reg;
7874        u64 dma_mask, persist_dma_mask;
7875        int err;
7876
7877        SET_NETDEV_DEV(dev, &pdev->dev);
7878        bp = netdev_priv(dev);
7879
7880        bp->flags = 0;
7881        bp->phy_flags = 0;
7882
7883        bp->temp_stats_blk =
7884                kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7885
7886        if (bp->temp_stats_blk == NULL) {
7887                rc = -ENOMEM;
7888                goto err_out;
7889        }
7890
7891        /* enable device (incl. PCI PM wakeup), and bus-mastering */
7892        rc = pci_enable_device(pdev);
7893        if (rc) {
7894                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7895                goto err_out;
7896        }
7897
7898        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7899                dev_err(&pdev->dev,
7900                        "Cannot find PCI device base address, aborting\n");
7901                rc = -ENODEV;
7902                goto err_out_disable;
7903        }
7904
7905        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7906        if (rc) {
7907                dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7908                goto err_out_disable;
7909        }
7910
7911        pci_set_master(pdev);
7912
7913        bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7914        if (bp->pm_cap == 0) {
7915                dev_err(&pdev->dev,
7916                        "Cannot find power management capability, aborting\n");
7917                rc = -EIO;
7918                goto err_out_release;
7919        }
7920
7921        bp->dev = dev;
7922        bp->pdev = pdev;
7923
7924        spin_lock_init(&bp->phy_lock);
7925        spin_lock_init(&bp->indirect_lock);
7926#ifdef BCM_CNIC
7927        mutex_init(&bp->cnic_lock);
7928#endif
7929        INIT_WORK(&bp->reset_task, bnx2_reset_task);
7930
7931        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7932        mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7933        dev->mem_end = dev->mem_start + mem_len;
7934        dev->irq = pdev->irq;
7935
7936        bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7937
7938        if (!bp->regview) {
7939                dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7940                rc = -ENOMEM;
7941                goto err_out_release;
7942        }
7943
7944        bnx2_set_power_state(bp, PCI_D0);
7945
7946        /* Configure byte swap and enable write to the reg_window registers.
7947         * Rely on CPU to do target byte swapping on big endian systems
7948         * The chip's target access swapping will not swap all accesses
7949         */
7950        REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7951                   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7952                   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7953
7954        bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7955
7956        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7957                if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7958                        dev_err(&pdev->dev,
7959                                "Cannot find PCIE capability, aborting\n");
7960                        rc = -EIO;
7961                        goto err_out_unmap;
7962                }
7963                bp->flags |= BNX2_FLAG_PCIE;
7964                if (CHIP_REV(bp) == CHIP_REV_Ax)
7965                        bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7966
7967                /* AER (Advanced Error Reporting) hooks */
7968                err = pci_enable_pcie_error_reporting(pdev);
7969                if (!err)
7970                        bp->flags |= BNX2_FLAG_AER_ENABLED;
7971
7972        } else {
7973                bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7974                if (bp->pcix_cap == 0) {
7975                        dev_err(&pdev->dev,
7976                                "Cannot find PCIX capability, aborting\n");
7977                        rc = -EIO;
7978                        goto err_out_unmap;
7979                }
7980                bp->flags |= BNX2_FLAG_BROKEN_STATS;
7981        }
7982
7983        if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7984                if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7985                        bp->flags |= BNX2_FLAG_MSIX_CAP;
7986        }
7987
7988        if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7989                if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7990                        bp->flags |= BNX2_FLAG_MSI_CAP;
7991        }
7992
7993        /* 5708 cannot support DMA addresses > 40-bit.  */
7994        if (CHIP_NUM(bp) == CHIP_NUM_5708)
7995                persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7996        else
7997                persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7998
7999        /* Configure DMA attributes. */
8000        if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8001                dev->features |= NETIF_F_HIGHDMA;
8002                rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8003                if (rc) {
8004                        dev_err(&pdev->dev,
8005                                "pci_set_consistent_dma_mask failed, aborting\n");
8006                        goto err_out_unmap;
8007                }
8008        } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8009                dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8010                goto err_out_unmap;
8011        }
8012
8013        if (!(bp->flags & BNX2_FLAG_PCIE))
8014                bnx2_get_pci_speed(bp);
8015
8016        /* 5706A0 may falsely detect SERR and PERR. */
8017        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8018                reg = REG_RD(bp, PCI_COMMAND);
8019                reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8020                REG_WR(bp, PCI_COMMAND, reg);
8021        }
8022        else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8023                !(bp->flags & BNX2_FLAG_PCIX)) {
8024
8025                dev_err(&pdev->dev,
8026                        "5706 A1 can only be used in a PCIX bus, aborting\n");
8027                goto err_out_unmap;
8028        }
8029
8030        bnx2_init_nvram(bp);
8031
8032        reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8033
8034        if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8035            BNX2_SHM_HDR_SIGNATURE_SIG) {
8036                u32 off = PCI_FUNC(pdev->devfn) << 2;
8037
8038                bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8039        } else
8040                bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8041
8042        /* Get the permanent MAC address.  First we need to make sure the
8043         * firmware is actually running.
8044         */
8045        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8046
8047        if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8048            BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8049                dev_err(&pdev->dev, "Firmware not running, aborting\n");
8050                rc = -ENODEV;
8051                goto err_out_unmap;
8052        }
8053
8054        bnx2_read_vpd_fw_ver(bp);
8055
8056        j = strlen(bp->fw_version);
8057        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8058        for (i = 0; i < 3 && j < 24; i++) {
8059                u8 num, k, skip0;
8060
8061                if (i == 0) {
8062                        bp->fw_version[j++] = 'b';
8063                        bp->fw_version[j++] = 'c';
8064                        bp->fw_version[j++] = ' ';
8065                }
8066                num = (u8) (reg >> (24 - (i * 8)));
8067                for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8068                        if (num >= k || !skip0 || k == 1) {
8069                                bp->fw_version[j++] = (num / k) + '0';
8070                                skip0 = 0;
8071                        }
8072                }
8073                if (i != 2)
8074                        bp->fw_version[j++] = '.';
8075        }
8076        reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8077        if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8078                bp->wol = 1;
8079
8080        if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8081                bp->flags |= BNX2_FLAG_ASF_ENABLE;
8082
8083                for (i = 0; i < 30; i++) {
8084                        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8085                        if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8086                                break;
8087                        msleep(10);
8088                }
8089        }
8090        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8091        reg &= BNX2_CONDITION_MFW_RUN_MASK;
8092        if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8093            reg != BNX2_CONDITION_MFW_RUN_NONE) {
8094                u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8095
8096                if (j < 32)
8097                        bp->fw_version[j++] = ' ';
8098                for (i = 0; i < 3 && j < 28; i++) {
8099                        reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8100                        reg = swab32(reg);
8101                        memcpy(&bp->fw_version[j], &reg, 4);
8102                        j += 4;
8103                }
8104        }
8105
8106        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8107        bp->mac_addr[0] = (u8) (reg >> 8);
8108        bp->mac_addr[1] = (u8) reg;
8109
8110        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8111        bp->mac_addr[2] = (u8) (reg >> 24);
8112        bp->mac_addr[3] = (u8) (reg >> 16);
8113        bp->mac_addr[4] = (u8) (reg >> 8);
8114        bp->mac_addr[5] = (u8) reg;
8115
8116        bp->tx_ring_size = MAX_TX_DESC_CNT;
8117        bnx2_set_rx_ring_size(bp, 255);
8118
8119        bp->rx_csum = 1;
8120
8121        bp->tx_quick_cons_trip_int = 2;
8122        bp->tx_quick_cons_trip = 20;
8123        bp->tx_ticks_int = 18;
8124        bp->tx_ticks = 80;
8125
8126        bp->rx_quick_cons_trip_int = 2;
8127        bp->rx_quick_cons_trip = 12;
8128        bp->rx_ticks_int = 18;
8129        bp->rx_ticks = 18;
8130
8131        bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8132
8133        bp->current_interval = BNX2_TIMER_INTERVAL;
8134
8135        bp->phy_addr = 1;
8136
8137        /* Disable WOL support if we are running on a SERDES chip. */
8138        if (CHIP_NUM(bp) == CHIP_NUM_5709)
8139                bnx2_get_5709_media(bp);
8140        else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8141                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8142
8143        bp->phy_port = PORT_TP;
8144        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8145                bp->phy_port = PORT_FIBRE;
8146                reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8147                if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8148                        bp->flags |= BNX2_FLAG_NO_WOL;
8149                        bp->wol = 0;
8150                }
8151                if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8152                        /* Don't do parallel detect on this board because of
8153                         * some board problems.  The link will not go down
8154                         * if we do parallel detect.
8155                         */
8156                        if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8157                            pdev->subsystem_device == 0x310c)
8158                                bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8159                } else {
8160                        bp->phy_addr = 2;
8161                        if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8162                                bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8163                }
8164        } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8165                   CHIP_NUM(bp) == CHIP_NUM_5708)
8166                bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8167        else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8168                 (CHIP_REV(bp) == CHIP_REV_Ax ||
8169                  CHIP_REV(bp) == CHIP_REV_Bx))
8170                bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8171
8172        bnx2_init_fw_cap(bp);
8173
8174        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8175            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8176            (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8177            !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8178                bp->flags |= BNX2_FLAG_NO_WOL;
8179                bp->wol = 0;
8180        }
8181
8182        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8183                bp->tx_quick_cons_trip_int =
8184                        bp->tx_quick_cons_trip;
8185                bp->tx_ticks_int = bp->tx_ticks;
8186                bp->rx_quick_cons_trip_int =
8187                        bp->rx_quick_cons_trip;
8188                bp->rx_ticks_int = bp->rx_ticks;
8189                bp->comp_prod_trip_int = bp->comp_prod_trip;
8190                bp->com_ticks_int = bp->com_ticks;
8191                bp->cmd_ticks_int = bp->cmd_ticks;
8192        }
8193
8194        /* Disable MSI on 5706 if AMD 8132 bridge is found.
8195         *
8196         * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8197         * with byte enables disabled on the unused 32-bit word.  This is legal
8198         * but causes problems on the AMD 8132 which will eventually stop
8199         * responding after a while.
8200         *
8201         * AMD believes this incompatibility is unique to the 5706, and
8202         * prefers to locally disable MSI rather than globally disabling it.
8203         */
8204        if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8205                struct pci_dev *amd_8132 = NULL;
8206
8207                while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8208                                                  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8209                                                  amd_8132))) {
8210
8211                        if (amd_8132->revision >= 0x10 &&
8212                            amd_8132->revision <= 0x13) {
8213                                disable_msi = 1;
8214                                pci_dev_put(amd_8132);
8215                                break;
8216                        }
8217                }
8218        }
8219
8220        bnx2_set_default_link(bp);
8221        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8222
8223        init_timer(&bp->timer);
8224        bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8225        bp->timer.data = (unsigned long) bp;
8226        bp->timer.function = bnx2_timer;
8227
8228        pci_save_state(pdev);
8229
8230        return 0;
8231
8232err_out_unmap:
8233        if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8234                pci_disable_pcie_error_reporting(pdev);
8235                bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8236        }
8237
8238        if (bp->regview) {
8239                iounmap(bp->regview);
8240                bp->regview = NULL;
8241        }
8242
8243err_out_release:
8244        pci_release_regions(pdev);
8245
8246err_out_disable:
8247        pci_disable_device(pdev);
8248        pci_set_drvdata(pdev, NULL);
8249
8250err_out:
8251        return rc;
8252}
8253
8254static char * __devinit
8255bnx2_bus_string(struct bnx2 *bp, char *str)
8256{
8257        char *s = str;
8258
8259        if (bp->flags & BNX2_FLAG_PCIE) {
8260                s += sprintf(s, "PCI Express");
8261        } else {
8262                s += sprintf(s, "PCI");
8263                if (bp->flags & BNX2_FLAG_PCIX)
8264                        s += sprintf(s, "-X");
8265                if (bp->flags & BNX2_FLAG_PCI_32BIT)
8266                        s += sprintf(s, " 32-bit");
8267                else
8268                        s += sprintf(s, " 64-bit");
8269                s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8270        }
8271        return str;
8272}
8273
8274static void
8275bnx2_del_napi(struct bnx2 *bp)
8276{
8277        int i;
8278
8279        for (i = 0; i < bp->irq_nvecs; i++)
8280                netif_napi_del(&bp->bnx2_napi[i].napi);
8281}
8282
8283static void
8284bnx2_init_napi(struct bnx2 *bp)
8285{
8286        int i;
8287
8288        for (i = 0; i < bp->irq_nvecs; i++) {
8289                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8290                int (*poll)(struct napi_struct *, int);
8291
8292                if (i == 0)
8293                        poll = bnx2_poll;
8294                else
8295                        poll = bnx2_poll_msix;
8296
8297                netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8298                bnapi->bp = bp;
8299        }
8300}
8301
8302static const struct net_device_ops bnx2_netdev_ops = {
8303        .ndo_open               = bnx2_open,
8304        .ndo_start_xmit         = bnx2_start_xmit,
8305        .ndo_stop               = bnx2_close,
8306        .ndo_get_stats64        = bnx2_get_stats64,
8307        .ndo_set_rx_mode        = bnx2_set_rx_mode,
8308        .ndo_do_ioctl           = bnx2_ioctl,
8309        .ndo_validate_addr      = eth_validate_addr,
8310        .ndo_set_mac_address    = bnx2_change_mac_addr,
8311        .ndo_change_mtu         = bnx2_change_mtu,
8312        .ndo_tx_timeout         = bnx2_tx_timeout,
8313#ifdef CONFIG_NET_POLL_CONTROLLER
8314        .ndo_poll_controller    = poll_bnx2,
8315#endif
8316};
8317
8318static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8319{
8320        dev->vlan_features |= flags;
8321}
8322
8323static int __devinit
8324bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8325{
8326        static int version_printed = 0;
8327        struct net_device *dev = NULL;
8328        struct bnx2 *bp;
8329        int rc;
8330        char str[40];
8331
8332        if (version_printed++ == 0)
8333                pr_info("%s", version);
8334
8335        /* dev zeroed in init_etherdev */
8336        dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8337
8338        if (!dev)
8339                return -ENOMEM;
8340
8341        rc = bnx2_init_board(pdev, dev);
8342        if (rc < 0) {
8343                free_netdev(dev);
8344                return rc;
8345        }
8346
8347        dev->netdev_ops = &bnx2_netdev_ops;
8348        dev->watchdog_timeo = TX_TIMEOUT;
8349        dev->ethtool_ops = &bnx2_ethtool_ops;
8350
8351        bp = netdev_priv(dev);
8352
8353        pci_set_drvdata(pdev, dev);
8354
8355        rc = bnx2_request_firmware(bp);
8356        if (rc)
8357                goto error;
8358
8359        memcpy(dev->dev_addr, bp->mac_addr, 6);
8360        memcpy(dev->perm_addr, bp->mac_addr, 6);
8361
8362        dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8363                         NETIF_F_RXHASH;
8364        vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8365        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8366                dev->features |= NETIF_F_IPV6_CSUM;
8367                vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8368        }
8369        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8370        dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8371        vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8372        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8373                dev->features |= NETIF_F_TSO6;
8374                vlan_features_add(dev, NETIF_F_TSO6);
8375        }
8376        if ((rc = register_netdev(dev))) {
8377                dev_err(&pdev->dev, "Cannot register net device\n");
8378                goto error;
8379        }
8380
8381        netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8382                    board_info[ent->driver_data].name,
8383                    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8384                    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8385                    bnx2_bus_string(bp, str),
8386                    dev->base_addr,
8387                    bp->pdev->irq, dev->dev_addr);
8388
8389        return 0;
8390
8391error:
8392        if (bp->mips_firmware)
8393                release_firmware(bp->mips_firmware);
8394        if (bp->rv2p_firmware)
8395                release_firmware(bp->rv2p_firmware);
8396
8397        if (bp->regview)
8398                iounmap(bp->regview);
8399        pci_release_regions(pdev);
8400        pci_disable_device(pdev);
8401        pci_set_drvdata(pdev, NULL);
8402        free_netdev(dev);
8403        return rc;
8404}
8405
8406static void __devexit
8407bnx2_remove_one(struct pci_dev *pdev)
8408{
8409        struct net_device *dev = pci_get_drvdata(pdev);
8410        struct bnx2 *bp = netdev_priv(dev);
8411
8412        unregister_netdev(dev);
8413
8414        if (bp->mips_firmware)
8415                release_firmware(bp->mips_firmware);
8416        if (bp->rv2p_firmware)
8417                release_firmware(bp->rv2p_firmware);
8418
8419        if (bp->regview)
8420                iounmap(bp->regview);
8421
8422        kfree(bp->temp_stats_blk);
8423
8424        if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8425                pci_disable_pcie_error_reporting(pdev);
8426                bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8427        }
8428
8429        free_netdev(dev);
8430
8431        pci_release_regions(pdev);
8432        pci_disable_device(pdev);
8433        pci_set_drvdata(pdev, NULL);
8434}
8435
8436static int
8437bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8438{
8439        struct net_device *dev = pci_get_drvdata(pdev);
8440        struct bnx2 *bp = netdev_priv(dev);
8441
8442        /* PCI register 4 needs to be saved whether netif_running() or not.
8443         * MSI address and data need to be saved if using MSI and
8444         * netif_running().
8445         */
8446        pci_save_state(pdev);
8447        if (!netif_running(dev))
8448                return 0;
8449
8450        cancel_work_sync(&bp->reset_task);
8451        bnx2_netif_stop(bp, true);
8452        netif_device_detach(dev);
8453        del_timer_sync(&bp->timer);
8454        bnx2_shutdown_chip(bp);
8455        bnx2_free_skbs(bp);
8456        bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8457        return 0;
8458}
8459
8460static int
8461bnx2_resume(struct pci_dev *pdev)
8462{
8463        struct net_device *dev = pci_get_drvdata(pdev);
8464        struct bnx2 *bp = netdev_priv(dev);
8465
8466        pci_restore_state(pdev);
8467        if (!netif_running(dev))
8468                return 0;
8469
8470        bnx2_set_power_state(bp, PCI_D0);
8471        netif_device_attach(dev);
8472        bnx2_init_nic(bp, 1);
8473        bnx2_netif_start(bp, true);
8474        return 0;
8475}
8476
8477/**
8478 * bnx2_io_error_detected - called when PCI error is detected
8479 * @pdev: Pointer to PCI device
8480 * @state: The current pci connection state
8481 *
8482 * This function is called after a PCI bus error affecting
8483 * this device has been detected.
8484 */
8485static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8486                                               pci_channel_state_t state)
8487{
8488        struct net_device *dev = pci_get_drvdata(pdev);
8489        struct bnx2 *bp = netdev_priv(dev);
8490
8491        rtnl_lock();
8492        netif_device_detach(dev);
8493
8494        if (state == pci_channel_io_perm_failure) {
8495                rtnl_unlock();
8496                return PCI_ERS_RESULT_DISCONNECT;
8497        }
8498
8499        if (netif_running(dev)) {
8500                bnx2_netif_stop(bp, true);
8501                del_timer_sync(&bp->timer);
8502                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8503        }
8504
8505        pci_disable_device(pdev);
8506        rtnl_unlock();
8507
8508        /* Request a slot slot reset. */
8509        return PCI_ERS_RESULT_NEED_RESET;
8510}
8511
8512/**
8513 * bnx2_io_slot_reset - called after the pci bus has been reset.
8514 * @pdev: Pointer to PCI device
8515 *
8516 * Restart the card from scratch, as if from a cold-boot.
8517 */
8518static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8519{
8520        struct net_device *dev = pci_get_drvdata(pdev);
8521        struct bnx2 *bp = netdev_priv(dev);
8522        pci_ers_result_t result;
8523        int err;
8524
8525        rtnl_lock();
8526        if (pci_enable_device(pdev)) {
8527                dev_err(&pdev->dev,
8528                        "Cannot re-enable PCI device after reset\n");
8529                result = PCI_ERS_RESULT_DISCONNECT;
8530        } else {
8531                pci_set_master(pdev);
8532                pci_restore_state(pdev);
8533                pci_save_state(pdev);
8534
8535                if (netif_running(dev)) {
8536                        bnx2_set_power_state(bp, PCI_D0);
8537                        bnx2_init_nic(bp, 1);
8538                }
8539                result = PCI_ERS_RESULT_RECOVERED;
8540        }
8541        rtnl_unlock();
8542
8543        if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8544                return result;
8545
8546        err = pci_cleanup_aer_uncorrect_error_status(pdev);
8547        if (err) {
8548                dev_err(&pdev->dev,
8549                        "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8550                         err); /* non-fatal, continue */
8551        }
8552
8553        return result;
8554}
8555
8556/**
8557 * bnx2_io_resume - called when traffic can start flowing again.
8558 * @pdev: Pointer to PCI device
8559 *
8560 * This callback is called when the error recovery driver tells us that
8561 * its OK to resume normal operation.
8562 */
8563static void bnx2_io_resume(struct pci_dev *pdev)
8564{
8565        struct net_device *dev = pci_get_drvdata(pdev);
8566        struct bnx2 *bp = netdev_priv(dev);
8567
8568        rtnl_lock();
8569        if (netif_running(dev))
8570                bnx2_netif_start(bp, true);
8571
8572        netif_device_attach(dev);
8573        rtnl_unlock();
8574}
8575
8576static struct pci_error_handlers bnx2_err_handler = {
8577        .error_detected = bnx2_io_error_detected,
8578        .slot_reset     = bnx2_io_slot_reset,
8579        .resume         = bnx2_io_resume,
8580};
8581
8582static struct pci_driver bnx2_pci_driver = {
8583        .name           = DRV_MODULE_NAME,
8584        .id_table       = bnx2_pci_tbl,
8585        .probe          = bnx2_init_one,
8586        .remove         = __devexit_p(bnx2_remove_one),
8587        .suspend        = bnx2_suspend,
8588        .resume         = bnx2_resume,
8589        .err_handler    = &bnx2_err_handler,
8590};
8591
8592static int __init bnx2_init(void)
8593{
8594        return pci_register_driver(&bnx2_pci_driver);
8595}
8596
8597static void __exit bnx2_cleanup(void)
8598{
8599        pci_unregister_driver(&bnx2_pci_driver);
8600}
8601
8602module_init(bnx2_init);
8603module_exit(bnx2_cleanup);
8604
8605
8606
8607