linux/drivers/net/bnx2.c
<<
>>
Prefs
   1/* bnx2.c: Broadcom NX2 network driver.
   2 *
   3 * Copyright (c) 2004-2009 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Written by: Michael Chan  (mchan@broadcom.com)
  10 */
  11
  12
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15
  16#include <linux/kernel.h>
  17#include <linux/timer.h>
  18#include <linux/errno.h>
  19#include <linux/ioport.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/interrupt.h>
  23#include <linux/pci.h>
  24#include <linux/init.h>
  25#include <linux/netdevice.h>
  26#include <linux/etherdevice.h>
  27#include <linux/skbuff.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/bitops.h>
  30#include <asm/io.h>
  31#include <asm/irq.h>
  32#include <linux/delay.h>
  33#include <asm/byteorder.h>
  34#include <asm/page.h>
  35#include <linux/time.h>
  36#include <linux/ethtool.h>
  37#include <linux/mii.h>
  38#include <linux/if_vlan.h>
  39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  40#define BCM_VLAN 1
  41#endif
  42#include <net/ip.h>
  43#include <net/tcp.h>
  44#include <net/checksum.h>
  45#include <linux/workqueue.h>
  46#include <linux/crc32.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/firmware.h>
  50#include <linux/log2.h>
  51#include <linux/list.h>
  52
  53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  54#define BCM_CNIC 1
  55#include "cnic_if.h"
  56#endif
  57#include "bnx2.h"
  58#include "bnx2_fw.h"
  59
  60#define DRV_MODULE_NAME         "bnx2"
  61#define PFX DRV_MODULE_NAME     ": "
  62#define DRV_MODULE_VERSION      "2.0.2"
  63#define DRV_MODULE_RELDATE      "Aug 21, 2009"
  64#define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
  65#define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
  66#define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
  67#define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
  68#define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
  69
  70#define RUN_AT(x) (jiffies + (x))
  71
  72/* Time in jiffies before concluding the transmitter is hung. */
  73#define TX_TIMEOUT  (5*HZ)
  74
  75static char version[] __devinitdata =
  76        "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  77
  78MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  79MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  80MODULE_LICENSE("GPL");
  81MODULE_VERSION(DRV_MODULE_VERSION);
  82MODULE_FIRMWARE(FW_MIPS_FILE_06);
  83MODULE_FIRMWARE(FW_RV2P_FILE_06);
  84MODULE_FIRMWARE(FW_MIPS_FILE_09);
  85MODULE_FIRMWARE(FW_RV2P_FILE_09);
  86MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  87
  88static int disable_msi = 0;
  89
  90module_param(disable_msi, int, 0);
  91MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  92
  93typedef enum {
  94        BCM5706 = 0,
  95        NC370T,
  96        NC370I,
  97        BCM5706S,
  98        NC370F,
  99        BCM5708,
 100        BCM5708S,
 101        BCM5709,
 102        BCM5709S,
 103        BCM5716,
 104        BCM5716S,
 105} board_t;
 106
 107/* indexed by board_t, above */
 108static struct {
 109        char *name;
 110} board_info[] __devinitdata = {
 111        { "Broadcom NetXtreme II BCM5706 1000Base-T" },
 112        { "HP NC370T Multifunction Gigabit Server Adapter" },
 113        { "HP NC370i Multifunction Gigabit Server Adapter" },
 114        { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 115        { "HP NC370F Multifunction Gigabit Server Adapter" },
 116        { "Broadcom NetXtreme II BCM5708 1000Base-T" },
 117        { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 118        { "Broadcom NetXtreme II BCM5709 1000Base-T" },
 119        { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 120        { "Broadcom NetXtreme II BCM5716 1000Base-T" },
 121        { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 122        };
 123
 124static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
 125        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 126          PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 127        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 128          PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 129        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 130          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 131        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 132          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 133        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 134          PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 135        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 136          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 137        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 138          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 139        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 140          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 141        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 142          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 143        { PCI_VENDOR_ID_BROADCOM, 0x163b,
 144          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 145        { PCI_VENDOR_ID_BROADCOM, 0x163c,
 146          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 147        { 0, }
 148};
 149
 150static const struct flash_spec flash_table[] =
 151{
 152#define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 153#define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
 154        /* Slow EEPROM */
 155        {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 156         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 157         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 158         "EEPROM - slow"},
 159        /* Expansion entry 0001 */
 160        {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 161         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 162         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 163         "Entry 0001"},
 164        /* Saifun SA25F010 (non-buffered flash) */
 165        /* strap, cfg1, & write1 need updates */
 166        {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 167         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 168         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 169         "Non-buffered flash (128kB)"},
 170        /* Saifun SA25F020 (non-buffered flash) */
 171        /* strap, cfg1, & write1 need updates */
 172        {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 173         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 174         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 175         "Non-buffered flash (256kB)"},
 176        /* Expansion entry 0100 */
 177        {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 178         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 179         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 180         "Entry 0100"},
 181        /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 182        {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 183         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 184         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 185         "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 186        /* Entry 0110: ST M45PE20 (non-buffered flash)*/
 187        {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 188         NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 189         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 190         "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 191        /* Saifun SA25F005 (non-buffered flash) */
 192        /* strap, cfg1, & write1 need updates */
 193        {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 194         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 195         SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 196         "Non-buffered flash (64kB)"},
 197        /* Fast EEPROM */
 198        {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 199         BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 200         SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 201         "EEPROM - fast"},
 202        /* Expansion entry 1001 */
 203        {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 204         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 205         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 206         "Entry 1001"},
 207        /* Expansion entry 1010 */
 208        {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 209         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 210         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 211         "Entry 1010"},
 212        /* ATMEL AT45DB011B (buffered flash) */
 213        {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 214         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 215         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 216         "Buffered flash (128kB)"},
 217        /* Expansion entry 1100 */
 218        {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 219         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 220         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 221         "Entry 1100"},
 222        /* Expansion entry 1101 */
 223        {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 224         NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 225         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 226         "Entry 1101"},
 227        /* Ateml Expansion entry 1110 */
 228        {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 229         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 230         BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 231         "Entry 1110 (Atmel)"},
 232        /* ATMEL AT45DB021B (buffered flash) */
 233        {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 234         BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 235         BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 236         "Buffered flash (256kB)"},
 237};
 238
 239static const struct flash_spec flash_5709 = {
 240        .flags          = BNX2_NV_BUFFERED,
 241        .page_bits      = BCM5709_FLASH_PAGE_BITS,
 242        .page_size      = BCM5709_FLASH_PAGE_SIZE,
 243        .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
 244        .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
 245        .name           = "5709 Buffered flash (256kB)",
 246};
 247
 248MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 249
 250static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 251{
 252        u32 diff;
 253
 254        smp_mb();
 255
 256        /* The ring uses 256 indices for 255 entries, one of them
 257         * needs to be skipped.
 258         */
 259        diff = txr->tx_prod - txr->tx_cons;
 260        if (unlikely(diff >= TX_DESC_CNT)) {
 261                diff &= 0xffff;
 262                if (diff == TX_DESC_CNT)
 263                        diff = MAX_TX_DESC_CNT;
 264        }
 265        return (bp->tx_ring_size - diff);
 266}
 267
 268static u32
 269bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 270{
 271        u32 val;
 272
 273        spin_lock_bh(&bp->indirect_lock);
 274        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 275        val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
 276        spin_unlock_bh(&bp->indirect_lock);
 277        return val;
 278}
 279
 280static void
 281bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 282{
 283        spin_lock_bh(&bp->indirect_lock);
 284        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 285        REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 286        spin_unlock_bh(&bp->indirect_lock);
 287}
 288
 289static void
 290bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 291{
 292        bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 293}
 294
 295static u32
 296bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 297{
 298        return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
 299}
 300
 301static void
 302bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 303{
 304        offset += cid_addr;
 305        spin_lock_bh(&bp->indirect_lock);
 306        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 307                int i;
 308
 309                REG_WR(bp, BNX2_CTX_CTX_DATA, val);
 310                REG_WR(bp, BNX2_CTX_CTX_CTRL,
 311                       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 312                for (i = 0; i < 5; i++) {
 313                        val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
 314                        if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 315                                break;
 316                        udelay(5);
 317                }
 318        } else {
 319                REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
 320                REG_WR(bp, BNX2_CTX_DATA, val);
 321        }
 322        spin_unlock_bh(&bp->indirect_lock);
 323}
 324
 325#ifdef BCM_CNIC
 326static int
 327bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 328{
 329        struct bnx2 *bp = netdev_priv(dev);
 330        struct drv_ctl_io *io = &info->data.io;
 331
 332        switch (info->cmd) {
 333        case DRV_CTL_IO_WR_CMD:
 334                bnx2_reg_wr_ind(bp, io->offset, io->data);
 335                break;
 336        case DRV_CTL_IO_RD_CMD:
 337                io->data = bnx2_reg_rd_ind(bp, io->offset);
 338                break;
 339        case DRV_CTL_CTX_WR_CMD:
 340                bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 341                break;
 342        default:
 343                return -EINVAL;
 344        }
 345        return 0;
 346}
 347
 348static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 349{
 350        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 351        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 352        int sb_id;
 353
 354        if (bp->flags & BNX2_FLAG_USING_MSIX) {
 355                cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 356                bnapi->cnic_present = 0;
 357                sb_id = bp->irq_nvecs;
 358                cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 359        } else {
 360                cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 361                bnapi->cnic_tag = bnapi->last_status_idx;
 362                bnapi->cnic_present = 1;
 363                sb_id = 0;
 364                cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 365        }
 366
 367        cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 368        cp->irq_arr[0].status_blk = (void *)
 369                ((unsigned long) bnapi->status_blk.msi +
 370                (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 371        cp->irq_arr[0].status_blk_num = sb_id;
 372        cp->num_irq = 1;
 373}
 374
 375static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 376                              void *data)
 377{
 378        struct bnx2 *bp = netdev_priv(dev);
 379        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 380
 381        if (ops == NULL)
 382                return -EINVAL;
 383
 384        if (cp->drv_state & CNIC_DRV_STATE_REGD)
 385                return -EBUSY;
 386
 387        bp->cnic_data = data;
 388        rcu_assign_pointer(bp->cnic_ops, ops);
 389
 390        cp->num_irq = 0;
 391        cp->drv_state = CNIC_DRV_STATE_REGD;
 392
 393        bnx2_setup_cnic_irq_info(bp);
 394
 395        return 0;
 396}
 397
 398static int bnx2_unregister_cnic(struct net_device *dev)
 399{
 400        struct bnx2 *bp = netdev_priv(dev);
 401        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 402        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 403
 404        mutex_lock(&bp->cnic_lock);
 405        cp->drv_state = 0;
 406        bnapi->cnic_present = 0;
 407        rcu_assign_pointer(bp->cnic_ops, NULL);
 408        mutex_unlock(&bp->cnic_lock);
 409        synchronize_rcu();
 410        return 0;
 411}
 412
 413struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 414{
 415        struct bnx2 *bp = netdev_priv(dev);
 416        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 417
 418        cp->drv_owner = THIS_MODULE;
 419        cp->chip_id = bp->chip_id;
 420        cp->pdev = bp->pdev;
 421        cp->io_base = bp->regview;
 422        cp->drv_ctl = bnx2_drv_ctl;
 423        cp->drv_register_cnic = bnx2_register_cnic;
 424        cp->drv_unregister_cnic = bnx2_unregister_cnic;
 425
 426        return cp;
 427}
 428EXPORT_SYMBOL(bnx2_cnic_probe);
 429
 430static void
 431bnx2_cnic_stop(struct bnx2 *bp)
 432{
 433        struct cnic_ops *c_ops;
 434        struct cnic_ctl_info info;
 435
 436        mutex_lock(&bp->cnic_lock);
 437        c_ops = bp->cnic_ops;
 438        if (c_ops) {
 439                info.cmd = CNIC_CTL_STOP_CMD;
 440                c_ops->cnic_ctl(bp->cnic_data, &info);
 441        }
 442        mutex_unlock(&bp->cnic_lock);
 443}
 444
 445static void
 446bnx2_cnic_start(struct bnx2 *bp)
 447{
 448        struct cnic_ops *c_ops;
 449        struct cnic_ctl_info info;
 450
 451        mutex_lock(&bp->cnic_lock);
 452        c_ops = bp->cnic_ops;
 453        if (c_ops) {
 454                if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 455                        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 456
 457                        bnapi->cnic_tag = bnapi->last_status_idx;
 458                }
 459                info.cmd = CNIC_CTL_START_CMD;
 460                c_ops->cnic_ctl(bp->cnic_data, &info);
 461        }
 462        mutex_unlock(&bp->cnic_lock);
 463}
 464
 465#else
 466
 467static void
 468bnx2_cnic_stop(struct bnx2 *bp)
 469{
 470}
 471
 472static void
 473bnx2_cnic_start(struct bnx2 *bp)
 474{
 475}
 476
 477#endif
 478
 479static int
 480bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 481{
 482        u32 val1;
 483        int i, ret;
 484
 485        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 486                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 487                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 488
 489                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 490                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 491
 492                udelay(40);
 493        }
 494
 495        val1 = (bp->phy_addr << 21) | (reg << 16) |
 496                BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 497                BNX2_EMAC_MDIO_COMM_START_BUSY;
 498        REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 499
 500        for (i = 0; i < 50; i++) {
 501                udelay(10);
 502
 503                val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 504                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 505                        udelay(5);
 506
 507                        val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 508                        val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 509
 510                        break;
 511                }
 512        }
 513
 514        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 515                *val = 0x0;
 516                ret = -EBUSY;
 517        }
 518        else {
 519                *val = val1;
 520                ret = 0;
 521        }
 522
 523        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 524                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 525                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 526
 527                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 528                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 529
 530                udelay(40);
 531        }
 532
 533        return ret;
 534}
 535
 536static int
 537bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 538{
 539        u32 val1;
 540        int i, ret;
 541
 542        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 543                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 544                val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 545
 546                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 547                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 548
 549                udelay(40);
 550        }
 551
 552        val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 553                BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 554                BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 555        REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 556
 557        for (i = 0; i < 50; i++) {
 558                udelay(10);
 559
 560                val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 561                if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 562                        udelay(5);
 563                        break;
 564                }
 565        }
 566
 567        if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 568                ret = -EBUSY;
 569        else
 570                ret = 0;
 571
 572        if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 573                val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 574                val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 575
 576                REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 577                REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 578
 579                udelay(40);
 580        }
 581
 582        return ret;
 583}
 584
 585static void
 586bnx2_disable_int(struct bnx2 *bp)
 587{
 588        int i;
 589        struct bnx2_napi *bnapi;
 590
 591        for (i = 0; i < bp->irq_nvecs; i++) {
 592                bnapi = &bp->bnx2_napi[i];
 593                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 594                       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 595        }
 596        REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 597}
 598
 599static void
 600bnx2_enable_int(struct bnx2 *bp)
 601{
 602        int i;
 603        struct bnx2_napi *bnapi;
 604
 605        for (i = 0; i < bp->irq_nvecs; i++) {
 606                bnapi = &bp->bnx2_napi[i];
 607
 608                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 609                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 610                       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 611                       bnapi->last_status_idx);
 612
 613                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 614                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 615                       bnapi->last_status_idx);
 616        }
 617        REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 618}
 619
 620static void
 621bnx2_disable_int_sync(struct bnx2 *bp)
 622{
 623        int i;
 624
 625        atomic_inc(&bp->intr_sem);
 626        if (!netif_running(bp->dev))
 627                return;
 628
 629        bnx2_disable_int(bp);
 630        for (i = 0; i < bp->irq_nvecs; i++)
 631                synchronize_irq(bp->irq_tbl[i].vector);
 632}
 633
 634static void
 635bnx2_napi_disable(struct bnx2 *bp)
 636{
 637        int i;
 638
 639        for (i = 0; i < bp->irq_nvecs; i++)
 640                napi_disable(&bp->bnx2_napi[i].napi);
 641}
 642
 643static void
 644bnx2_napi_enable(struct bnx2 *bp)
 645{
 646        int i;
 647
 648        for (i = 0; i < bp->irq_nvecs; i++)
 649                napi_enable(&bp->bnx2_napi[i].napi);
 650}
 651
 652static void
 653bnx2_netif_stop(struct bnx2 *bp)
 654{
 655        bnx2_cnic_stop(bp);
 656        bnx2_disable_int_sync(bp);
 657        if (netif_running(bp->dev)) {
 658                bnx2_napi_disable(bp);
 659                netif_tx_disable(bp->dev);
 660                bp->dev->trans_start = jiffies; /* prevent tx timeout */
 661        }
 662}
 663
 664static void
 665bnx2_netif_start(struct bnx2 *bp)
 666{
 667        if (atomic_dec_and_test(&bp->intr_sem)) {
 668                if (netif_running(bp->dev)) {
 669                        netif_tx_wake_all_queues(bp->dev);
 670                        bnx2_napi_enable(bp);
 671                        bnx2_enable_int(bp);
 672                        bnx2_cnic_start(bp);
 673                }
 674        }
 675}
 676
 677static void
 678bnx2_free_tx_mem(struct bnx2 *bp)
 679{
 680        int i;
 681
 682        for (i = 0; i < bp->num_tx_rings; i++) {
 683                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 684                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 685
 686                if (txr->tx_desc_ring) {
 687                        pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
 688                                            txr->tx_desc_ring,
 689                                            txr->tx_desc_mapping);
 690                        txr->tx_desc_ring = NULL;
 691                }
 692                kfree(txr->tx_buf_ring);
 693                txr->tx_buf_ring = NULL;
 694        }
 695}
 696
 697static void
 698bnx2_free_rx_mem(struct bnx2 *bp)
 699{
 700        int i;
 701
 702        for (i = 0; i < bp->num_rx_rings; i++) {
 703                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 704                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 705                int j;
 706
 707                for (j = 0; j < bp->rx_max_ring; j++) {
 708                        if (rxr->rx_desc_ring[j])
 709                                pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
 710                                                    rxr->rx_desc_ring[j],
 711                                                    rxr->rx_desc_mapping[j]);
 712                        rxr->rx_desc_ring[j] = NULL;
 713                }
 714                vfree(rxr->rx_buf_ring);
 715                rxr->rx_buf_ring = NULL;
 716
 717                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 718                        if (rxr->rx_pg_desc_ring[j])
 719                                pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
 720                                                    rxr->rx_pg_desc_ring[j],
 721                                                    rxr->rx_pg_desc_mapping[j]);
 722                        rxr->rx_pg_desc_ring[j] = NULL;
 723                }
 724                vfree(rxr->rx_pg_ring);
 725                rxr->rx_pg_ring = NULL;
 726        }
 727}
 728
 729static int
 730bnx2_alloc_tx_mem(struct bnx2 *bp)
 731{
 732        int i;
 733
 734        for (i = 0; i < bp->num_tx_rings; i++) {
 735                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 736                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 737
 738                txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 739                if (txr->tx_buf_ring == NULL)
 740                        return -ENOMEM;
 741
 742                txr->tx_desc_ring =
 743                        pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
 744                                             &txr->tx_desc_mapping);
 745                if (txr->tx_desc_ring == NULL)
 746                        return -ENOMEM;
 747        }
 748        return 0;
 749}
 750
 751static int
 752bnx2_alloc_rx_mem(struct bnx2 *bp)
 753{
 754        int i;
 755
 756        for (i = 0; i < bp->num_rx_rings; i++) {
 757                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 758                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 759                int j;
 760
 761                rxr->rx_buf_ring =
 762                        vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
 763                if (rxr->rx_buf_ring == NULL)
 764                        return -ENOMEM;
 765
 766                memset(rxr->rx_buf_ring, 0,
 767                       SW_RXBD_RING_SIZE * bp->rx_max_ring);
 768
 769                for (j = 0; j < bp->rx_max_ring; j++) {
 770                        rxr->rx_desc_ring[j] =
 771                                pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
 772                                                     &rxr->rx_desc_mapping[j]);
 773                        if (rxr->rx_desc_ring[j] == NULL)
 774                                return -ENOMEM;
 775
 776                }
 777
 778                if (bp->rx_pg_ring_size) {
 779                        rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
 780                                                  bp->rx_max_pg_ring);
 781                        if (rxr->rx_pg_ring == NULL)
 782                                return -ENOMEM;
 783
 784                        memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
 785                               bp->rx_max_pg_ring);
 786                }
 787
 788                for (j = 0; j < bp->rx_max_pg_ring; j++) {
 789                        rxr->rx_pg_desc_ring[j] =
 790                                pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
 791                                                &rxr->rx_pg_desc_mapping[j]);
 792                        if (rxr->rx_pg_desc_ring[j] == NULL)
 793                                return -ENOMEM;
 794
 795                }
 796        }
 797        return 0;
 798}
 799
 800static void
 801bnx2_free_mem(struct bnx2 *bp)
 802{
 803        int i;
 804        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 805
 806        bnx2_free_tx_mem(bp);
 807        bnx2_free_rx_mem(bp);
 808
 809        for (i = 0; i < bp->ctx_pages; i++) {
 810                if (bp->ctx_blk[i]) {
 811                        pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
 812                                            bp->ctx_blk[i],
 813                                            bp->ctx_blk_mapping[i]);
 814                        bp->ctx_blk[i] = NULL;
 815                }
 816        }
 817        if (bnapi->status_blk.msi) {
 818                pci_free_consistent(bp->pdev, bp->status_stats_size,
 819                                    bnapi->status_blk.msi,
 820                                    bp->status_blk_mapping);
 821                bnapi->status_blk.msi = NULL;
 822                bp->stats_blk = NULL;
 823        }
 824}
 825
 826static int
 827bnx2_alloc_mem(struct bnx2 *bp)
 828{
 829        int i, status_blk_size, err;
 830        struct bnx2_napi *bnapi;
 831        void *status_blk;
 832
 833        /* Combine status and statistics blocks into one allocation. */
 834        status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 835        if (bp->flags & BNX2_FLAG_MSIX_CAP)
 836                status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 837                                                 BNX2_SBLK_MSIX_ALIGN_SIZE);
 838        bp->status_stats_size = status_blk_size +
 839                                sizeof(struct statistics_block);
 840
 841        status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
 842                                          &bp->status_blk_mapping);
 843        if (status_blk == NULL)
 844                goto alloc_mem_err;
 845
 846        memset(status_blk, 0, bp->status_stats_size);
 847
 848        bnapi = &bp->bnx2_napi[0];
 849        bnapi->status_blk.msi = status_blk;
 850        bnapi->hw_tx_cons_ptr =
 851                &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 852        bnapi->hw_rx_cons_ptr =
 853                &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 854        if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 855                for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
 856                        struct status_block_msix *sblk;
 857
 858                        bnapi = &bp->bnx2_napi[i];
 859
 860                        sblk = (void *) (status_blk +
 861                                         BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 862                        bnapi->status_blk.msix = sblk;
 863                        bnapi->hw_tx_cons_ptr =
 864                                &sblk->status_tx_quick_consumer_index;
 865                        bnapi->hw_rx_cons_ptr =
 866                                &sblk->status_rx_quick_consumer_index;
 867                        bnapi->int_num = i << 24;
 868                }
 869        }
 870
 871        bp->stats_blk = status_blk + status_blk_size;
 872
 873        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 874
 875        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 876                bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
 877                if (bp->ctx_pages == 0)
 878                        bp->ctx_pages = 1;
 879                for (i = 0; i < bp->ctx_pages; i++) {
 880                        bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
 881                                                BCM_PAGE_SIZE,
 882                                                &bp->ctx_blk_mapping[i]);
 883                        if (bp->ctx_blk[i] == NULL)
 884                                goto alloc_mem_err;
 885                }
 886        }
 887
 888        err = bnx2_alloc_rx_mem(bp);
 889        if (err)
 890                goto alloc_mem_err;
 891
 892        err = bnx2_alloc_tx_mem(bp);
 893        if (err)
 894                goto alloc_mem_err;
 895
 896        return 0;
 897
 898alloc_mem_err:
 899        bnx2_free_mem(bp);
 900        return -ENOMEM;
 901}
 902
 903static void
 904bnx2_report_fw_link(struct bnx2 *bp)
 905{
 906        u32 fw_link_status = 0;
 907
 908        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 909                return;
 910
 911        if (bp->link_up) {
 912                u32 bmsr;
 913
 914                switch (bp->line_speed) {
 915                case SPEED_10:
 916                        if (bp->duplex == DUPLEX_HALF)
 917                                fw_link_status = BNX2_LINK_STATUS_10HALF;
 918                        else
 919                                fw_link_status = BNX2_LINK_STATUS_10FULL;
 920                        break;
 921                case SPEED_100:
 922                        if (bp->duplex == DUPLEX_HALF)
 923                                fw_link_status = BNX2_LINK_STATUS_100HALF;
 924                        else
 925                                fw_link_status = BNX2_LINK_STATUS_100FULL;
 926                        break;
 927                case SPEED_1000:
 928                        if (bp->duplex == DUPLEX_HALF)
 929                                fw_link_status = BNX2_LINK_STATUS_1000HALF;
 930                        else
 931                                fw_link_status = BNX2_LINK_STATUS_1000FULL;
 932                        break;
 933                case SPEED_2500:
 934                        if (bp->duplex == DUPLEX_HALF)
 935                                fw_link_status = BNX2_LINK_STATUS_2500HALF;
 936                        else
 937                                fw_link_status = BNX2_LINK_STATUS_2500FULL;
 938                        break;
 939                }
 940
 941                fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 942
 943                if (bp->autoneg) {
 944                        fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 945
 946                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 947                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 948
 949                        if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 950                            bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 951                                fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 952                        else
 953                                fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 954                }
 955        }
 956        else
 957                fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 958
 959        bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 960}
 961
 962static char *
 963bnx2_xceiver_str(struct bnx2 *bp)
 964{
 965        return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
 966                ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 967                 "Copper"));
 968}
 969
 970static void
 971bnx2_report_link(struct bnx2 *bp)
 972{
 973        if (bp->link_up) {
 974                netif_carrier_on(bp->dev);
 975                printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
 976                       bnx2_xceiver_str(bp));
 977
 978                printk("%d Mbps ", bp->line_speed);
 979
 980                if (bp->duplex == DUPLEX_FULL)
 981                        printk("full duplex");
 982                else
 983                        printk("half duplex");
 984
 985                if (bp->flow_ctrl) {
 986                        if (bp->flow_ctrl & FLOW_CTRL_RX) {
 987                                printk(", receive ");
 988                                if (bp->flow_ctrl & FLOW_CTRL_TX)
 989                                        printk("& transmit ");
 990                        }
 991                        else {
 992                                printk(", transmit ");
 993                        }
 994                        printk("flow control ON");
 995                }
 996                printk("\n");
 997        }
 998        else {
 999                netif_carrier_off(bp->dev);
1000                printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1001                       bnx2_xceiver_str(bp));
1002        }
1003
1004        bnx2_report_fw_link(bp);
1005}
1006
1007static void
1008bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1009{
1010        u32 local_adv, remote_adv;
1011
1012        bp->flow_ctrl = 0;
1013        if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1014                (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1015
1016                if (bp->duplex == DUPLEX_FULL) {
1017                        bp->flow_ctrl = bp->req_flow_ctrl;
1018                }
1019                return;
1020        }
1021
1022        if (bp->duplex != DUPLEX_FULL) {
1023                return;
1024        }
1025
1026        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1027            (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1028                u32 val;
1029
1030                bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1031                if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1032                        bp->flow_ctrl |= FLOW_CTRL_TX;
1033                if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1034                        bp->flow_ctrl |= FLOW_CTRL_RX;
1035                return;
1036        }
1037
1038        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1039        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040
1041        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1042                u32 new_local_adv = 0;
1043                u32 new_remote_adv = 0;
1044
1045                if (local_adv & ADVERTISE_1000XPAUSE)
1046                        new_local_adv |= ADVERTISE_PAUSE_CAP;
1047                if (local_adv & ADVERTISE_1000XPSE_ASYM)
1048                        new_local_adv |= ADVERTISE_PAUSE_ASYM;
1049                if (remote_adv & ADVERTISE_1000XPAUSE)
1050                        new_remote_adv |= ADVERTISE_PAUSE_CAP;
1051                if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1052                        new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1053
1054                local_adv = new_local_adv;
1055                remote_adv = new_remote_adv;
1056        }
1057
1058        /* See Table 28B-3 of 802.3ab-1999 spec. */
1059        if (local_adv & ADVERTISE_PAUSE_CAP) {
1060                if(local_adv & ADVERTISE_PAUSE_ASYM) {
1061                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1062                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1063                        }
1064                        else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1065                                bp->flow_ctrl = FLOW_CTRL_RX;
1066                        }
1067                }
1068                else {
1069                        if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                        }
1072                }
1073        }
1074        else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1075                if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1076                        (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1077
1078                        bp->flow_ctrl = FLOW_CTRL_TX;
1079                }
1080        }
1081}
1082
1083static int
1084bnx2_5709s_linkup(struct bnx2 *bp)
1085{
1086        u32 val, speed;
1087
1088        bp->link_up = 1;
1089
1090        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1091        bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1092        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094        if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1095                bp->line_speed = bp->req_line_speed;
1096                bp->duplex = bp->req_duplex;
1097                return 0;
1098        }
1099        speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1100        switch (speed) {
1101                case MII_BNX2_GP_TOP_AN_SPEED_10:
1102                        bp->line_speed = SPEED_10;
1103                        break;
1104                case MII_BNX2_GP_TOP_AN_SPEED_100:
1105                        bp->line_speed = SPEED_100;
1106                        break;
1107                case MII_BNX2_GP_TOP_AN_SPEED_1G:
1108                case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1109                        bp->line_speed = SPEED_1000;
1110                        break;
1111                case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1112                        bp->line_speed = SPEED_2500;
1113                        break;
1114        }
1115        if (val & MII_BNX2_GP_TOP_AN_FD)
1116                bp->duplex = DUPLEX_FULL;
1117        else
1118                bp->duplex = DUPLEX_HALF;
1119        return 0;
1120}
1121
1122static int
1123bnx2_5708s_linkup(struct bnx2 *bp)
1124{
1125        u32 val;
1126
1127        bp->link_up = 1;
1128        bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1129        switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1130                case BCM5708S_1000X_STAT1_SPEED_10:
1131                        bp->line_speed = SPEED_10;
1132                        break;
1133                case BCM5708S_1000X_STAT1_SPEED_100:
1134                        bp->line_speed = SPEED_100;
1135                        break;
1136                case BCM5708S_1000X_STAT1_SPEED_1G:
1137                        bp->line_speed = SPEED_1000;
1138                        break;
1139                case BCM5708S_1000X_STAT1_SPEED_2G5:
1140                        bp->line_speed = SPEED_2500;
1141                        break;
1142        }
1143        if (val & BCM5708S_1000X_STAT1_FD)
1144                bp->duplex = DUPLEX_FULL;
1145        else
1146                bp->duplex = DUPLEX_HALF;
1147
1148        return 0;
1149}
1150
1151static int
1152bnx2_5706s_linkup(struct bnx2 *bp)
1153{
1154        u32 bmcr, local_adv, remote_adv, common;
1155
1156        bp->link_up = 1;
1157        bp->line_speed = SPEED_1000;
1158
1159        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1160        if (bmcr & BMCR_FULLDPLX) {
1161                bp->duplex = DUPLEX_FULL;
1162        }
1163        else {
1164                bp->duplex = DUPLEX_HALF;
1165        }
1166
1167        if (!(bmcr & BMCR_ANENABLE)) {
1168                return 0;
1169        }
1170
1171        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1172        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1173
1174        common = local_adv & remote_adv;
1175        if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1176
1177                if (common & ADVERTISE_1000XFULL) {
1178                        bp->duplex = DUPLEX_FULL;
1179                }
1180                else {
1181                        bp->duplex = DUPLEX_HALF;
1182                }
1183        }
1184
1185        return 0;
1186}
1187
1188static int
1189bnx2_copper_linkup(struct bnx2 *bp)
1190{
1191        u32 bmcr;
1192
1193        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1194        if (bmcr & BMCR_ANENABLE) {
1195                u32 local_adv, remote_adv, common;
1196
1197                bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1198                bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1199
1200                common = local_adv & (remote_adv >> 2);
1201                if (common & ADVERTISE_1000FULL) {
1202                        bp->line_speed = SPEED_1000;
1203                        bp->duplex = DUPLEX_FULL;
1204                }
1205                else if (common & ADVERTISE_1000HALF) {
1206                        bp->line_speed = SPEED_1000;
1207                        bp->duplex = DUPLEX_HALF;
1208                }
1209                else {
1210                        bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1211                        bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1212
1213                        common = local_adv & remote_adv;
1214                        if (common & ADVERTISE_100FULL) {
1215                                bp->line_speed = SPEED_100;
1216                                bp->duplex = DUPLEX_FULL;
1217                        }
1218                        else if (common & ADVERTISE_100HALF) {
1219                                bp->line_speed = SPEED_100;
1220                                bp->duplex = DUPLEX_HALF;
1221                        }
1222                        else if (common & ADVERTISE_10FULL) {
1223                                bp->line_speed = SPEED_10;
1224                                bp->duplex = DUPLEX_FULL;
1225                        }
1226                        else if (common & ADVERTISE_10HALF) {
1227                                bp->line_speed = SPEED_10;
1228                                bp->duplex = DUPLEX_HALF;
1229                        }
1230                        else {
1231                                bp->line_speed = 0;
1232                                bp->link_up = 0;
1233                        }
1234                }
1235        }
1236        else {
1237                if (bmcr & BMCR_SPEED100) {
1238                        bp->line_speed = SPEED_100;
1239                }
1240                else {
1241                        bp->line_speed = SPEED_10;
1242                }
1243                if (bmcr & BMCR_FULLDPLX) {
1244                        bp->duplex = DUPLEX_FULL;
1245                }
1246                else {
1247                        bp->duplex = DUPLEX_HALF;
1248                }
1249        }
1250
1251        return 0;
1252}
1253
1254static void
1255bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1256{
1257        u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1258
1259        val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1260        val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1261        val |= 0x02 << 8;
1262
1263        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                u32 lo_water, hi_water;
1265
1266                if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                        lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1268                else
1269                        lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1270                if (lo_water >= bp->rx_ring_size)
1271                        lo_water = 0;
1272
1273                hi_water = bp->rx_ring_size / 4;
1274
1275                if (hi_water <= lo_water)
1276                        lo_water = 0;
1277
1278                hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1279                lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1280
1281                if (hi_water > 0xf)
1282                        hi_water = 0xf;
1283                else if (hi_water == 0)
1284                        lo_water = 0;
1285                val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1286        }
1287        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1288}
1289
1290static void
1291bnx2_init_all_rx_contexts(struct bnx2 *bp)
1292{
1293        int i;
1294        u32 cid;
1295
1296        for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1297                if (i == 1)
1298                        cid = RX_RSS_CID;
1299                bnx2_init_rx_context(bp, cid);
1300        }
1301}
1302
1303static void
1304bnx2_set_mac_link(struct bnx2 *bp)
1305{
1306        u32 val;
1307
1308        REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1309        if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1310                (bp->duplex == DUPLEX_HALF)) {
1311                REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1312        }
1313
1314        /* Configure the EMAC mode register. */
1315        val = REG_RD(bp, BNX2_EMAC_MODE);
1316
1317        val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1318                BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1319                BNX2_EMAC_MODE_25G_MODE);
1320
1321        if (bp->link_up) {
1322                switch (bp->line_speed) {
1323                        case SPEED_10:
1324                                if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1325                                        val |= BNX2_EMAC_MODE_PORT_MII_10M;
1326                                        break;
1327                                }
1328                                /* fall through */
1329                        case SPEED_100:
1330                                val |= BNX2_EMAC_MODE_PORT_MII;
1331                                break;
1332                        case SPEED_2500:
1333                                val |= BNX2_EMAC_MODE_25G_MODE;
1334                                /* fall through */
1335                        case SPEED_1000:
1336                                val |= BNX2_EMAC_MODE_PORT_GMII;
1337                                break;
1338                }
1339        }
1340        else {
1341                val |= BNX2_EMAC_MODE_PORT_GMII;
1342        }
1343
1344        /* Set the MAC to operate in the appropriate duplex mode. */
1345        if (bp->duplex == DUPLEX_HALF)
1346                val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1347        REG_WR(bp, BNX2_EMAC_MODE, val);
1348
1349        /* Enable/disable rx PAUSE. */
1350        bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1351
1352        if (bp->flow_ctrl & FLOW_CTRL_RX)
1353                bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1354        REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1355
1356        /* Enable/disable tx PAUSE. */
1357        val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1358        val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1359
1360        if (bp->flow_ctrl & FLOW_CTRL_TX)
1361                val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1362        REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1363
1364        /* Acknowledge the interrupt. */
1365        REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1366
1367        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1368                bnx2_init_all_rx_contexts(bp);
1369}
1370
1371static void
1372bnx2_enable_bmsr1(struct bnx2 *bp)
1373{
1374        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1375            (CHIP_NUM(bp) == CHIP_NUM_5709))
1376                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1377                               MII_BNX2_BLK_ADDR_GP_STATUS);
1378}
1379
1380static void
1381bnx2_disable_bmsr1(struct bnx2 *bp)
1382{
1383        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384            (CHIP_NUM(bp) == CHIP_NUM_5709))
1385                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1387}
1388
1389static int
1390bnx2_test_and_enable_2g5(struct bnx2 *bp)
1391{
1392        u32 up1;
1393        int ret = 1;
1394
1395        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1396                return 0;
1397
1398        if (bp->autoneg & AUTONEG_SPEED)
1399                bp->advertising |= ADVERTISED_2500baseX_Full;
1400
1401        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1402                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1403
1404        bnx2_read_phy(bp, bp->mii_up1, &up1);
1405        if (!(up1 & BCM5708S_UP1_2G5)) {
1406                up1 |= BCM5708S_UP1_2G5;
1407                bnx2_write_phy(bp, bp->mii_up1, up1);
1408                ret = 0;
1409        }
1410
1411        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1413                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1414
1415        return ret;
1416}
1417
1418static int
1419bnx2_test_and_disable_2g5(struct bnx2 *bp)
1420{
1421        u32 up1;
1422        int ret = 0;
1423
1424        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1425                return 0;
1426
1427        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1428                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1429
1430        bnx2_read_phy(bp, bp->mii_up1, &up1);
1431        if (up1 & BCM5708S_UP1_2G5) {
1432                up1 &= ~BCM5708S_UP1_2G5;
1433                bnx2_write_phy(bp, bp->mii_up1, up1);
1434                ret = 1;
1435        }
1436
1437        if (CHIP_NUM(bp) == CHIP_NUM_5709)
1438                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1439                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1440
1441        return ret;
1442}
1443
1444static void
1445bnx2_enable_forced_2g5(struct bnx2 *bp)
1446{
1447        u32 bmcr;
1448
1449        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1450                return;
1451
1452        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1453                u32 val;
1454
1455                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1457                bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1458                val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1459                val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1460                bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1461
1462                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1464                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465
1466        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468                bmcr |= BCM5708S_BMCR_FORCE_2500;
1469        }
1470
1471        if (bp->autoneg & AUTONEG_SPEED) {
1472                bmcr &= ~BMCR_ANENABLE;
1473                if (bp->req_duplex == DUPLEX_FULL)
1474                        bmcr |= BMCR_FULLDPLX;
1475        }
1476        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1477}
1478
1479static void
1480bnx2_disable_forced_2g5(struct bnx2 *bp)
1481{
1482        u32 bmcr;
1483
1484        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1485                return;
1486
1487        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1488                u32 val;
1489
1490                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1491                               MII_BNX2_BLK_ADDR_SERDES_DIG);
1492                bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1493                val &= ~MII_BNX2_SD_MISC1_FORCE;
1494                bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1495
1496                bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1497                               MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1498                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499
1500        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502                bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503        }
1504
1505        if (bp->autoneg & AUTONEG_SPEED)
1506                bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1507        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1508}
1509
1510static void
1511bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1512{
1513        u32 val;
1514
1515        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1516        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1517        if (start)
1518                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1519        else
1520                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1521}
1522
1523static int
1524bnx2_set_link(struct bnx2 *bp)
1525{
1526        u32 bmsr;
1527        u8 link_up;
1528
1529        if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1530                bp->link_up = 1;
1531                return 0;
1532        }
1533
1534        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1535                return 0;
1536
1537        link_up = bp->link_up;
1538
1539        bnx2_enable_bmsr1(bp);
1540        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1541        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1542        bnx2_disable_bmsr1(bp);
1543
1544        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1545            (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1546                u32 val, an_dbg;
1547
1548                if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1549                        bnx2_5706s_force_link_dn(bp, 0);
1550                        bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1551                }
1552                val = REG_RD(bp, BNX2_EMAC_STATUS);
1553
1554                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1555                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1556                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1557
1558                if ((val & BNX2_EMAC_STATUS_LINK) &&
1559                    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1560                        bmsr |= BMSR_LSTATUS;
1561                else
1562                        bmsr &= ~BMSR_LSTATUS;
1563        }
1564
1565        if (bmsr & BMSR_LSTATUS) {
1566                bp->link_up = 1;
1567
1568                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1569                        if (CHIP_NUM(bp) == CHIP_NUM_5706)
1570                                bnx2_5706s_linkup(bp);
1571                        else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1572                                bnx2_5708s_linkup(bp);
1573                        else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1574                                bnx2_5709s_linkup(bp);
1575                }
1576                else {
1577                        bnx2_copper_linkup(bp);
1578                }
1579                bnx2_resolve_flow_ctrl(bp);
1580        }
1581        else {
1582                if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1583                    (bp->autoneg & AUTONEG_SPEED))
1584                        bnx2_disable_forced_2g5(bp);
1585
1586                if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1587                        u32 bmcr;
1588
1589                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1590                        bmcr |= BMCR_ANENABLE;
1591                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1592
1593                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1594                }
1595                bp->link_up = 0;
1596        }
1597
1598        if (bp->link_up != link_up) {
1599                bnx2_report_link(bp);
1600        }
1601
1602        bnx2_set_mac_link(bp);
1603
1604        return 0;
1605}
1606
1607static int
1608bnx2_reset_phy(struct bnx2 *bp)
1609{
1610        int i;
1611        u32 reg;
1612
1613        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1614
1615#define PHY_RESET_MAX_WAIT 100
1616        for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1617                udelay(10);
1618
1619                bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1620                if (!(reg & BMCR_RESET)) {
1621                        udelay(20);
1622                        break;
1623                }
1624        }
1625        if (i == PHY_RESET_MAX_WAIT) {
1626                return -EBUSY;
1627        }
1628        return 0;
1629}
1630
1631static u32
1632bnx2_phy_get_pause_adv(struct bnx2 *bp)
1633{
1634        u32 adv = 0;
1635
1636        if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1637                (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1638
1639                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1640                        adv = ADVERTISE_1000XPAUSE;
1641                }
1642                else {
1643                        adv = ADVERTISE_PAUSE_CAP;
1644                }
1645        }
1646        else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1647                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1648                        adv = ADVERTISE_1000XPSE_ASYM;
1649                }
1650                else {
1651                        adv = ADVERTISE_PAUSE_ASYM;
1652                }
1653        }
1654        else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1655                if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1656                        adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1657                }
1658                else {
1659                        adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1660                }
1661        }
1662        return adv;
1663}
1664
1665static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1666
1667static int
1668bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1669__releases(&bp->phy_lock)
1670__acquires(&bp->phy_lock)
1671{
1672        u32 speed_arg = 0, pause_adv;
1673
1674        pause_adv = bnx2_phy_get_pause_adv(bp);
1675
1676        if (bp->autoneg & AUTONEG_SPEED) {
1677                speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1678                if (bp->advertising & ADVERTISED_10baseT_Half)
1679                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1680                if (bp->advertising & ADVERTISED_10baseT_Full)
1681                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1682                if (bp->advertising & ADVERTISED_100baseT_Half)
1683                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1684                if (bp->advertising & ADVERTISED_100baseT_Full)
1685                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1686                if (bp->advertising & ADVERTISED_1000baseT_Full)
1687                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1688                if (bp->advertising & ADVERTISED_2500baseX_Full)
1689                        speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1690        } else {
1691                if (bp->req_line_speed == SPEED_2500)
1692                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693                else if (bp->req_line_speed == SPEED_1000)
1694                        speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695                else if (bp->req_line_speed == SPEED_100) {
1696                        if (bp->req_duplex == DUPLEX_FULL)
1697                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698                        else
1699                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1700                } else if (bp->req_line_speed == SPEED_10) {
1701                        if (bp->req_duplex == DUPLEX_FULL)
1702                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1703                        else
1704                                speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1705                }
1706        }
1707
1708        if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1709                speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1710        if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1711                speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1712
1713        if (port == PORT_TP)
1714                speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1715                             BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1716
1717        bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1718
1719        spin_unlock_bh(&bp->phy_lock);
1720        bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1721        spin_lock_bh(&bp->phy_lock);
1722
1723        return 0;
1724}
1725
1726static int
1727bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1728__releases(&bp->phy_lock)
1729__acquires(&bp->phy_lock)
1730{
1731        u32 adv, bmcr;
1732        u32 new_adv = 0;
1733
1734        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1735                return (bnx2_setup_remote_phy(bp, port));
1736
1737        if (!(bp->autoneg & AUTONEG_SPEED)) {
1738                u32 new_bmcr;
1739                int force_link_down = 0;
1740
1741                if (bp->req_line_speed == SPEED_2500) {
1742                        if (!bnx2_test_and_enable_2g5(bp))
1743                                force_link_down = 1;
1744                } else if (bp->req_line_speed == SPEED_1000) {
1745                        if (bnx2_test_and_disable_2g5(bp))
1746                                force_link_down = 1;
1747                }
1748                bnx2_read_phy(bp, bp->mii_adv, &adv);
1749                adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1750
1751                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1752                new_bmcr = bmcr & ~BMCR_ANENABLE;
1753                new_bmcr |= BMCR_SPEED1000;
1754
1755                if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1756                        if (bp->req_line_speed == SPEED_2500)
1757                                bnx2_enable_forced_2g5(bp);
1758                        else if (bp->req_line_speed == SPEED_1000) {
1759                                bnx2_disable_forced_2g5(bp);
1760                                new_bmcr &= ~0x2000;
1761                        }
1762
1763                } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1764                        if (bp->req_line_speed == SPEED_2500)
1765                                new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1766                        else
1767                                new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1768                }
1769
1770                if (bp->req_duplex == DUPLEX_FULL) {
1771                        adv |= ADVERTISE_1000XFULL;
1772                        new_bmcr |= BMCR_FULLDPLX;
1773                }
1774                else {
1775                        adv |= ADVERTISE_1000XHALF;
1776                        new_bmcr &= ~BMCR_FULLDPLX;
1777                }
1778                if ((new_bmcr != bmcr) || (force_link_down)) {
1779                        /* Force a link down visible on the other side */
1780                        if (bp->link_up) {
1781                                bnx2_write_phy(bp, bp->mii_adv, adv &
1782                                               ~(ADVERTISE_1000XFULL |
1783                                                 ADVERTISE_1000XHALF));
1784                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1785                                        BMCR_ANRESTART | BMCR_ANENABLE);
1786
1787                                bp->link_up = 0;
1788                                netif_carrier_off(bp->dev);
1789                                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1790                                bnx2_report_link(bp);
1791                        }
1792                        bnx2_write_phy(bp, bp->mii_adv, adv);
1793                        bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                } else {
1795                        bnx2_resolve_flow_ctrl(bp);
1796                        bnx2_set_mac_link(bp);
1797                }
1798                return 0;
1799        }
1800
1801        bnx2_test_and_enable_2g5(bp);
1802
1803        if (bp->advertising & ADVERTISED_1000baseT_Full)
1804                new_adv |= ADVERTISE_1000XFULL;
1805
1806        new_adv |= bnx2_phy_get_pause_adv(bp);
1807
1808        bnx2_read_phy(bp, bp->mii_adv, &adv);
1809        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1810
1811        bp->serdes_an_pending = 0;
1812        if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1813                /* Force a link down visible on the other side */
1814                if (bp->link_up) {
1815                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1816                        spin_unlock_bh(&bp->phy_lock);
1817                        msleep(20);
1818                        spin_lock_bh(&bp->phy_lock);
1819                }
1820
1821                bnx2_write_phy(bp, bp->mii_adv, new_adv);
1822                bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1823                        BMCR_ANENABLE);
1824                /* Speed up link-up time when the link partner
1825                 * does not autonegotiate which is very common
1826                 * in blade servers. Some blade servers use
1827                 * IPMI for kerboard input and it's important
1828                 * to minimize link disruptions. Autoneg. involves
1829                 * exchanging base pages plus 3 next pages and
1830                 * normally completes in about 120 msec.
1831                 */
1832                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1833                bp->serdes_an_pending = 1;
1834                mod_timer(&bp->timer, jiffies + bp->current_interval);
1835        } else {
1836                bnx2_resolve_flow_ctrl(bp);
1837                bnx2_set_mac_link(bp);
1838        }
1839
1840        return 0;
1841}
1842
1843#define ETHTOOL_ALL_FIBRE_SPEED                                         \
1844        (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1845                (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1846                (ADVERTISED_1000baseT_Full)
1847
1848#define ETHTOOL_ALL_COPPER_SPEED                                        \
1849        (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1850        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1851        ADVERTISED_1000baseT_Full)
1852
1853#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1854        ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1855
1856#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1857
1858static void
1859bnx2_set_default_remote_link(struct bnx2 *bp)
1860{
1861        u32 link;
1862
1863        if (bp->phy_port == PORT_TP)
1864                link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1865        else
1866                link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1867
1868        if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1869                bp->req_line_speed = 0;
1870                bp->autoneg |= AUTONEG_SPEED;
1871                bp->advertising = ADVERTISED_Autoneg;
1872                if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1873                        bp->advertising |= ADVERTISED_10baseT_Half;
1874                if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1875                        bp->advertising |= ADVERTISED_10baseT_Full;
1876                if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1877                        bp->advertising |= ADVERTISED_100baseT_Half;
1878                if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1879                        bp->advertising |= ADVERTISED_100baseT_Full;
1880                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1881                        bp->advertising |= ADVERTISED_1000baseT_Full;
1882                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1883                        bp->advertising |= ADVERTISED_2500baseX_Full;
1884        } else {
1885                bp->autoneg = 0;
1886                bp->advertising = 0;
1887                bp->req_duplex = DUPLEX_FULL;
1888                if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1889                        bp->req_line_speed = SPEED_10;
1890                        if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1891                                bp->req_duplex = DUPLEX_HALF;
1892                }
1893                if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1894                        bp->req_line_speed = SPEED_100;
1895                        if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896                                bp->req_duplex = DUPLEX_HALF;
1897                }
1898                if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1899                        bp->req_line_speed = SPEED_1000;
1900                if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1901                        bp->req_line_speed = SPEED_2500;
1902        }
1903}
1904
1905static void
1906bnx2_set_default_link(struct bnx2 *bp)
1907{
1908        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1909                bnx2_set_default_remote_link(bp);
1910                return;
1911        }
1912
1913        bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1914        bp->req_line_speed = 0;
1915        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1916                u32 reg;
1917
1918                bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1919
1920                reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1921                reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1922                if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1923                        bp->autoneg = 0;
1924                        bp->req_line_speed = bp->line_speed = SPEED_1000;
1925                        bp->req_duplex = DUPLEX_FULL;
1926                }
1927        } else
1928                bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1929}
1930
1931static void
1932bnx2_send_heart_beat(struct bnx2 *bp)
1933{
1934        u32 msg;
1935        u32 addr;
1936
1937        spin_lock(&bp->indirect_lock);
1938        msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1939        addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1940        REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1941        REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1942        spin_unlock(&bp->indirect_lock);
1943}
1944
1945static void
1946bnx2_remote_phy_event(struct bnx2 *bp)
1947{
1948        u32 msg;
1949        u8 link_up = bp->link_up;
1950        u8 old_port;
1951
1952        msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1953
1954        if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1955                bnx2_send_heart_beat(bp);
1956
1957        msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1958
1959        if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1960                bp->link_up = 0;
1961        else {
1962                u32 speed;
1963
1964                bp->link_up = 1;
1965                speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1966                bp->duplex = DUPLEX_FULL;
1967                switch (speed) {
1968                        case BNX2_LINK_STATUS_10HALF:
1969                                bp->duplex = DUPLEX_HALF;
1970                        case BNX2_LINK_STATUS_10FULL:
1971                                bp->line_speed = SPEED_10;
1972                                break;
1973                        case BNX2_LINK_STATUS_100HALF:
1974                                bp->duplex = DUPLEX_HALF;
1975                        case BNX2_LINK_STATUS_100BASE_T4:
1976                        case BNX2_LINK_STATUS_100FULL:
1977                                bp->line_speed = SPEED_100;
1978                                break;
1979                        case BNX2_LINK_STATUS_1000HALF:
1980                                bp->duplex = DUPLEX_HALF;
1981                        case BNX2_LINK_STATUS_1000FULL:
1982                                bp->line_speed = SPEED_1000;
1983                                break;
1984                        case BNX2_LINK_STATUS_2500HALF:
1985                                bp->duplex = DUPLEX_HALF;
1986                        case BNX2_LINK_STATUS_2500FULL:
1987                                bp->line_speed = SPEED_2500;
1988                                break;
1989                        default:
1990                                bp->line_speed = 0;
1991                                break;
1992                }
1993
1994                bp->flow_ctrl = 0;
1995                if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1996                    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1997                        if (bp->duplex == DUPLEX_FULL)
1998                                bp->flow_ctrl = bp->req_flow_ctrl;
1999                } else {
2000                        if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2001                                bp->flow_ctrl |= FLOW_CTRL_TX;
2002                        if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2003                                bp->flow_ctrl |= FLOW_CTRL_RX;
2004                }
2005
2006                old_port = bp->phy_port;
2007                if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2008                        bp->phy_port = PORT_FIBRE;
2009                else
2010                        bp->phy_port = PORT_TP;
2011
2012                if (old_port != bp->phy_port)
2013                        bnx2_set_default_link(bp);
2014
2015        }
2016        if (bp->link_up != link_up)
2017                bnx2_report_link(bp);
2018
2019        bnx2_set_mac_link(bp);
2020}
2021
2022static int
2023bnx2_set_remote_link(struct bnx2 *bp)
2024{
2025        u32 evt_code;
2026
2027        evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2028        switch (evt_code) {
2029                case BNX2_FW_EVT_CODE_LINK_EVENT:
2030                        bnx2_remote_phy_event(bp);
2031                        break;
2032                case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2033                default:
2034                        bnx2_send_heart_beat(bp);
2035                        break;
2036        }
2037        return 0;
2038}
2039
2040static int
2041bnx2_setup_copper_phy(struct bnx2 *bp)
2042__releases(&bp->phy_lock)
2043__acquires(&bp->phy_lock)
2044{
2045        u32 bmcr;
2046        u32 new_bmcr;
2047
2048        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2049
2050        if (bp->autoneg & AUTONEG_SPEED) {
2051                u32 adv_reg, adv1000_reg;
2052                u32 new_adv_reg = 0;
2053                u32 new_adv1000_reg = 0;
2054
2055                bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2056                adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2057                        ADVERTISE_PAUSE_ASYM);
2058
2059                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2060                adv1000_reg &= PHY_ALL_1000_SPEED;
2061
2062                if (bp->advertising & ADVERTISED_10baseT_Half)
2063                        new_adv_reg |= ADVERTISE_10HALF;
2064                if (bp->advertising & ADVERTISED_10baseT_Full)
2065                        new_adv_reg |= ADVERTISE_10FULL;
2066                if (bp->advertising & ADVERTISED_100baseT_Half)
2067                        new_adv_reg |= ADVERTISE_100HALF;
2068                if (bp->advertising & ADVERTISED_100baseT_Full)
2069                        new_adv_reg |= ADVERTISE_100FULL;
2070                if (bp->advertising & ADVERTISED_1000baseT_Full)
2071                        new_adv1000_reg |= ADVERTISE_1000FULL;
2072
2073                new_adv_reg |= ADVERTISE_CSMA;
2074
2075                new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2076
2077                if ((adv1000_reg != new_adv1000_reg) ||
2078                        (adv_reg != new_adv_reg) ||
2079                        ((bmcr & BMCR_ANENABLE) == 0)) {
2080
2081                        bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2082                        bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2083                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2084                                BMCR_ANENABLE);
2085                }
2086                else if (bp->link_up) {
2087                        /* Flow ctrl may have changed from auto to forced */
2088                        /* or vice-versa. */
2089
2090                        bnx2_resolve_flow_ctrl(bp);
2091                        bnx2_set_mac_link(bp);
2092                }
2093                return 0;
2094        }
2095
2096        new_bmcr = 0;
2097        if (bp->req_line_speed == SPEED_100) {
2098                new_bmcr |= BMCR_SPEED100;
2099        }
2100        if (bp->req_duplex == DUPLEX_FULL) {
2101                new_bmcr |= BMCR_FULLDPLX;
2102        }
2103        if (new_bmcr != bmcr) {
2104                u32 bmsr;
2105
2106                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107                bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108
2109                if (bmsr & BMSR_LSTATUS) {
2110                        /* Force link down */
2111                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2112                        spin_unlock_bh(&bp->phy_lock);
2113                        msleep(50);
2114                        spin_lock_bh(&bp->phy_lock);
2115
2116                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117                        bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118                }
2119
2120                bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2121
2122                /* Normally, the new speed is setup after the link has
2123                 * gone down and up again. In some cases, link will not go
2124                 * down so we need to set up the new speed here.
2125                 */
2126                if (bmsr & BMSR_LSTATUS) {
2127                        bp->line_speed = bp->req_line_speed;
2128                        bp->duplex = bp->req_duplex;
2129                        bnx2_resolve_flow_ctrl(bp);
2130                        bnx2_set_mac_link(bp);
2131                }
2132        } else {
2133                bnx2_resolve_flow_ctrl(bp);
2134                bnx2_set_mac_link(bp);
2135        }
2136        return 0;
2137}
2138
2139static int
2140bnx2_setup_phy(struct bnx2 *bp, u8 port)
2141__releases(&bp->phy_lock)
2142__acquires(&bp->phy_lock)
2143{
2144        if (bp->loopback == MAC_LOOPBACK)
2145                return 0;
2146
2147        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2148                return (bnx2_setup_serdes_phy(bp, port));
2149        }
2150        else {
2151                return (bnx2_setup_copper_phy(bp));
2152        }
2153}
2154
2155static int
2156bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157{
2158        u32 val;
2159
2160        bp->mii_bmcr = MII_BMCR + 0x10;
2161        bp->mii_bmsr = MII_BMSR + 0x10;
2162        bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2163        bp->mii_adv = MII_ADVERTISE + 0x10;
2164        bp->mii_lpa = MII_LPA + 0x10;
2165        bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2166
2167        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2168        bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2169
2170        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2171        if (reset_phy)
2172                bnx2_reset_phy(bp);
2173
2174        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2175
2176        bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2177        val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2178        val |= MII_BNX2_SD_1000XCTL1_FIBER;
2179        bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2180
2181        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2182        bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2183        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2184                val |= BCM5708S_UP1_2G5;
2185        else
2186                val &= ~BCM5708S_UP1_2G5;
2187        bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2188
2189        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2190        bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2191        val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2192        bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2193
2194        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2195
2196        val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2197              MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2198        bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2199
2200        bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201
2202        return 0;
2203}
2204
2205static int
2206bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207{
2208        u32 val;
2209
2210        if (reset_phy)
2211                bnx2_reset_phy(bp);
2212
2213        bp->mii_up1 = BCM5708S_UP1;
2214
2215        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2216        bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2217        bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2218
2219        bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2220        val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2221        bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2222
2223        bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2224        val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2225        bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2226
2227        if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2228                bnx2_read_phy(bp, BCM5708S_UP1, &val);
2229                val |= BCM5708S_UP1_2G5;
2230                bnx2_write_phy(bp, BCM5708S_UP1, val);
2231        }
2232
2233        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2234            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2235            (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2236                /* increase tx signal amplitude */
2237                bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2238                               BCM5708S_BLK_ADDR_TX_MISC);
2239                bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2240                val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2241                bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2242                bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243        }
2244
2245        val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2246              BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2247
2248        if (val) {
2249                u32 is_backplane;
2250
2251                is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2252                if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2253                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254                                       BCM5708S_BLK_ADDR_TX_MISC);
2255                        bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2256                        bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                       BCM5708S_BLK_ADDR_DIG);
2258                }
2259        }
2260        return 0;
2261}
2262
2263static int
2264bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2265{
2266        if (reset_phy)
2267                bnx2_reset_phy(bp);
2268
2269        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2270
2271        if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272                REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273
2274        if (bp->dev->mtu > 1500) {
2275                u32 val;
2276
2277                /* Set extended packet length bit */
2278                bnx2_write_phy(bp, 0x18, 0x7);
2279                bnx2_read_phy(bp, 0x18, &val);
2280                bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2281
2282                bnx2_write_phy(bp, 0x1c, 0x6c00);
2283                bnx2_read_phy(bp, 0x1c, &val);
2284                bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2285        }
2286        else {
2287                u32 val;
2288
2289                bnx2_write_phy(bp, 0x18, 0x7);
2290                bnx2_read_phy(bp, 0x18, &val);
2291                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2292
2293                bnx2_write_phy(bp, 0x1c, 0x6c00);
2294                bnx2_read_phy(bp, 0x1c, &val);
2295                bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296        }
2297
2298        return 0;
2299}
2300
2301static int
2302bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303{
2304        u32 val;
2305
2306        if (reset_phy)
2307                bnx2_reset_phy(bp);
2308
2309        if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2310                bnx2_write_phy(bp, 0x18, 0x0c00);
2311                bnx2_write_phy(bp, 0x17, 0x000a);
2312                bnx2_write_phy(bp, 0x15, 0x310b);
2313                bnx2_write_phy(bp, 0x17, 0x201f);
2314                bnx2_write_phy(bp, 0x15, 0x9506);
2315                bnx2_write_phy(bp, 0x17, 0x401f);
2316                bnx2_write_phy(bp, 0x15, 0x14e2);
2317                bnx2_write_phy(bp, 0x18, 0x0400);
2318        }
2319
2320        if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2321                bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2322                               MII_BNX2_DSP_EXPAND_REG | 0x8);
2323                bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2324                val &= ~(1 << 8);
2325                bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326        }
2327
2328        if (bp->dev->mtu > 1500) {
2329                /* Set extended packet length bit */
2330                bnx2_write_phy(bp, 0x18, 0x7);
2331                bnx2_read_phy(bp, 0x18, &val);
2332                bnx2_write_phy(bp, 0x18, val | 0x4000);
2333
2334                bnx2_read_phy(bp, 0x10, &val);
2335                bnx2_write_phy(bp, 0x10, val | 0x1);
2336        }
2337        else {
2338                bnx2_write_phy(bp, 0x18, 0x7);
2339                bnx2_read_phy(bp, 0x18, &val);
2340                bnx2_write_phy(bp, 0x18, val & ~0x4007);
2341
2342                bnx2_read_phy(bp, 0x10, &val);
2343                bnx2_write_phy(bp, 0x10, val & ~0x1);
2344        }
2345
2346        /* ethernet@wirespeed */
2347        bnx2_write_phy(bp, 0x18, 0x7007);
2348        bnx2_read_phy(bp, 0x18, &val);
2349        bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350        return 0;
2351}
2352
2353
2354static int
2355bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2356__releases(&bp->phy_lock)
2357__acquires(&bp->phy_lock)
2358{
2359        u32 val;
2360        int rc = 0;
2361
2362        bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2363        bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2364
2365        bp->mii_bmcr = MII_BMCR;
2366        bp->mii_bmsr = MII_BMSR;
2367        bp->mii_bmsr1 = MII_BMSR;
2368        bp->mii_adv = MII_ADVERTISE;
2369        bp->mii_lpa = MII_LPA;
2370
2371        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2372
2373        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2374                goto setup_phy;
2375
2376        bnx2_read_phy(bp, MII_PHYSID1, &val);
2377        bp->phy_id = val << 16;
2378        bnx2_read_phy(bp, MII_PHYSID2, &val);
2379        bp->phy_id |= val & 0xffff;
2380
2381        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382                if (CHIP_NUM(bp) == CHIP_NUM_5706)
2383                        rc = bnx2_init_5706s_phy(bp, reset_phy);
2384                else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2385                        rc = bnx2_init_5708s_phy(bp, reset_phy);
2386                else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2387                        rc = bnx2_init_5709s_phy(bp, reset_phy);
2388        }
2389        else {
2390                rc = bnx2_init_copper_phy(bp, reset_phy);
2391        }
2392
2393setup_phy:
2394        if (!rc)
2395                rc = bnx2_setup_phy(bp, bp->phy_port);
2396
2397        return rc;
2398}
2399
2400static int
2401bnx2_set_mac_loopback(struct bnx2 *bp)
2402{
2403        u32 mac_mode;
2404
2405        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2406        mac_mode &= ~BNX2_EMAC_MODE_PORT;
2407        mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2408        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409        bp->link_up = 1;
2410        return 0;
2411}
2412
2413static int bnx2_test_link(struct bnx2 *);
2414
2415static int
2416bnx2_set_phy_loopback(struct bnx2 *bp)
2417{
2418        u32 mac_mode;
2419        int rc, i;
2420
2421        spin_lock_bh(&bp->phy_lock);
2422        rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2423                            BMCR_SPEED1000);
2424        spin_unlock_bh(&bp->phy_lock);
2425        if (rc)
2426                return rc;
2427
2428        for (i = 0; i < 10; i++) {
2429                if (bnx2_test_link(bp) == 0)
2430                        break;
2431                msleep(100);
2432        }
2433
2434        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2435        mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2436                      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2437                      BNX2_EMAC_MODE_25G_MODE);
2438
2439        mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441        bp->link_up = 1;
2442        return 0;
2443}
2444
2445static int
2446bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2447{
2448        int i;
2449        u32 val;
2450
2451        bp->fw_wr_seq++;
2452        msg_data |= bp->fw_wr_seq;
2453
2454        bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2455
2456        if (!ack)
2457                return 0;
2458
2459        /* wait for an acknowledgement. */
2460        for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2461                msleep(10);
2462
2463                val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2464
2465                if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2466                        break;
2467        }
2468        if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2469                return 0;
2470
2471        /* If we timed out, inform the firmware that this is the case. */
2472        if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2473                if (!silent)
2474                        printk(KERN_ERR PFX "fw sync timeout, reset code = "
2475                                            "%x\n", msg_data);
2476
2477                msg_data &= ~BNX2_DRV_MSG_CODE;
2478                msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2479
2480                bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2481
2482                return -EBUSY;
2483        }
2484
2485        if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2486                return -EIO;
2487
2488        return 0;
2489}
2490
2491static int
2492bnx2_init_5709_context(struct bnx2 *bp)
2493{
2494        int i, ret = 0;
2495        u32 val;
2496
2497        val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2498        val |= (BCM_PAGE_BITS - 8) << 16;
2499        REG_WR(bp, BNX2_CTX_COMMAND, val);
2500        for (i = 0; i < 10; i++) {
2501                val = REG_RD(bp, BNX2_CTX_COMMAND);
2502                if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2503                        break;
2504                udelay(2);
2505        }
2506        if (val & BNX2_CTX_COMMAND_MEM_INIT)
2507                return -EBUSY;
2508
2509        for (i = 0; i < bp->ctx_pages; i++) {
2510                int j;
2511
2512                if (bp->ctx_blk[i])
2513                        memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2514                else
2515                        return -ENOMEM;
2516
2517                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2518                       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2519                       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2520                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2521                       (u64) bp->ctx_blk_mapping[i] >> 32);
2522                REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2523                       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2524                for (j = 0; j < 10; j++) {
2525
2526                        val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2527                        if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2528                                break;
2529                        udelay(5);
2530                }
2531                if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2532                        ret = -EBUSY;
2533                        break;
2534                }
2535        }
2536        return ret;
2537}
2538
2539static void
2540bnx2_init_context(struct bnx2 *bp)
2541{
2542        u32 vcid;
2543
2544        vcid = 96;
2545        while (vcid) {
2546                u32 vcid_addr, pcid_addr, offset;
2547                int i;
2548
2549                vcid--;
2550
2551                if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2552                        u32 new_vcid;
2553
2554                        vcid_addr = GET_PCID_ADDR(vcid);
2555                        if (vcid & 0x8) {
2556                                new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2557                        }
2558                        else {
2559                                new_vcid = vcid;
2560                        }
2561                        pcid_addr = GET_PCID_ADDR(new_vcid);
2562                }
2563                else {
2564                        vcid_addr = GET_CID_ADDR(vcid);
2565                        pcid_addr = vcid_addr;
2566                }
2567
2568                for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2569                        vcid_addr += (i << PHY_CTX_SHIFT);
2570                        pcid_addr += (i << PHY_CTX_SHIFT);
2571
2572                        REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2573                        REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2574
2575                        /* Zero out the context. */
2576                        for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2577                                bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2578                }
2579        }
2580}
2581
2582static int
2583bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2584{
2585        u16 *good_mbuf;
2586        u32 good_mbuf_cnt;
2587        u32 val;
2588
2589        good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2590        if (good_mbuf == NULL) {
2591                printk(KERN_ERR PFX "Failed to allocate memory in "
2592                                    "bnx2_alloc_bad_rbuf\n");
2593                return -ENOMEM;
2594        }
2595
2596        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2597                BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2598
2599        good_mbuf_cnt = 0;
2600
2601        /* Allocate a bunch of mbufs and save the good ones in an array. */
2602        val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2603        while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2604                bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2605                                BNX2_RBUF_COMMAND_ALLOC_REQ);
2606
2607                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2608
2609                val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2610
2611                /* The addresses with Bit 9 set are bad memory blocks. */
2612                if (!(val & (1 << 9))) {
2613                        good_mbuf[good_mbuf_cnt] = (u16) val;
2614                        good_mbuf_cnt++;
2615                }
2616
2617                val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2618        }
2619
2620        /* Free the good ones back to the mbuf pool thus discarding
2621         * all the bad ones. */
2622        while (good_mbuf_cnt) {
2623                good_mbuf_cnt--;
2624
2625                val = good_mbuf[good_mbuf_cnt];
2626                val = (val << 9) | val | 1;
2627
2628                bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2629        }
2630        kfree(good_mbuf);
2631        return 0;
2632}
2633
2634static void
2635bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2636{
2637        u32 val;
2638
2639        val = (mac_addr[0] << 8) | mac_addr[1];
2640
2641        REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2642
2643        val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2644                (mac_addr[4] << 8) | mac_addr[5];
2645
2646        REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2647}
2648
2649static inline int
2650bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2651{
2652        dma_addr_t mapping;
2653        struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2654        struct rx_bd *rxbd =
2655                &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2656        struct page *page = alloc_page(GFP_ATOMIC);
2657
2658        if (!page)
2659                return -ENOMEM;
2660        mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2661                               PCI_DMA_FROMDEVICE);
2662        if (pci_dma_mapping_error(bp->pdev, mapping)) {
2663                __free_page(page);
2664                return -EIO;
2665        }
2666
2667        rx_pg->page = page;
2668        pci_unmap_addr_set(rx_pg, mapping, mapping);
2669        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2670        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2671        return 0;
2672}
2673
2674static void
2675bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2676{
2677        struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2678        struct page *page = rx_pg->page;
2679
2680        if (!page)
2681                return;
2682
2683        pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2684                       PCI_DMA_FROMDEVICE);
2685
2686        __free_page(page);
2687        rx_pg->page = NULL;
2688}
2689
2690static inline int
2691bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2692{
2693        struct sk_buff *skb;
2694        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2695        dma_addr_t mapping;
2696        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2697        unsigned long align;
2698
2699        skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2700        if (skb == NULL) {
2701                return -ENOMEM;
2702        }
2703
2704        if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2705                skb_reserve(skb, BNX2_RX_ALIGN - align);
2706
2707        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2708                PCI_DMA_FROMDEVICE);
2709        if (pci_dma_mapping_error(bp->pdev, mapping)) {
2710                dev_kfree_skb(skb);
2711                return -EIO;
2712        }
2713
2714        rx_buf->skb = skb;
2715        pci_unmap_addr_set(rx_buf, mapping, mapping);
2716
2717        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2718        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2719
2720        rxr->rx_prod_bseq += bp->rx_buf_use_size;
2721
2722        return 0;
2723}
2724
2725static int
2726bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2727{
2728        struct status_block *sblk = bnapi->status_blk.msi;
2729        u32 new_link_state, old_link_state;
2730        int is_set = 1;
2731
2732        new_link_state = sblk->status_attn_bits & event;
2733        old_link_state = sblk->status_attn_bits_ack & event;
2734        if (new_link_state != old_link_state) {
2735                if (new_link_state)
2736                        REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2737                else
2738                        REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2739        } else
2740                is_set = 0;
2741
2742        return is_set;
2743}
2744
2745static void
2746bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2747{
2748        spin_lock(&bp->phy_lock);
2749
2750        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2751                bnx2_set_link(bp);
2752        if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2753                bnx2_set_remote_link(bp);
2754
2755        spin_unlock(&bp->phy_lock);
2756
2757}
2758
2759static inline u16
2760bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2761{
2762        u16 cons;
2763
2764        /* Tell compiler that status block fields can change. */
2765        barrier();
2766        cons = *bnapi->hw_tx_cons_ptr;
2767        barrier();
2768        if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2769                cons++;
2770        return cons;
2771}
2772
2773static int
2774bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2775{
2776        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2777        u16 hw_cons, sw_cons, sw_ring_cons;
2778        int tx_pkt = 0, index;
2779        struct netdev_queue *txq;
2780
2781        index = (bnapi - bp->bnx2_napi);
2782        txq = netdev_get_tx_queue(bp->dev, index);
2783
2784        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2785        sw_cons = txr->tx_cons;
2786
2787        while (sw_cons != hw_cons) {
2788                struct sw_tx_bd *tx_buf;
2789                struct sk_buff *skb;
2790                int i, last;
2791
2792                sw_ring_cons = TX_RING_IDX(sw_cons);
2793
2794                tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2795                skb = tx_buf->skb;
2796
2797                /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2798                prefetch(&skb->end);
2799
2800                /* partial BD completions possible with TSO packets */
2801                if (tx_buf->is_gso) {
2802                        u16 last_idx, last_ring_idx;
2803
2804                        last_idx = sw_cons + tx_buf->nr_frags + 1;
2805                        last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2806                        if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2807                                last_idx++;
2808                        }
2809                        if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2810                                break;
2811                        }
2812                }
2813
2814                skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2815
2816                tx_buf->skb = NULL;
2817                last = tx_buf->nr_frags;
2818
2819                for (i = 0; i < last; i++) {
2820                        sw_cons = NEXT_TX_BD(sw_cons);
2821                }
2822
2823                sw_cons = NEXT_TX_BD(sw_cons);
2824
2825                dev_kfree_skb(skb);
2826                tx_pkt++;
2827                if (tx_pkt == budget)
2828                        break;
2829
2830                if (hw_cons == sw_cons)
2831                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
2832        }
2833
2834        txr->hw_tx_cons = hw_cons;
2835        txr->tx_cons = sw_cons;
2836
2837        /* Need to make the tx_cons update visible to bnx2_start_xmit()
2838         * before checking for netif_tx_queue_stopped().  Without the
2839         * memory barrier, there is a small possibility that bnx2_start_xmit()
2840         * will miss it and cause the queue to be stopped forever.
2841         */
2842        smp_mb();
2843
2844        if (unlikely(netif_tx_queue_stopped(txq)) &&
2845                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2846                __netif_tx_lock(txq, smp_processor_id());
2847                if ((netif_tx_queue_stopped(txq)) &&
2848                    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2849                        netif_tx_wake_queue(txq);
2850                __netif_tx_unlock(txq);
2851        }
2852
2853        return tx_pkt;
2854}
2855
2856static void
2857bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2858                        struct sk_buff *skb, int count)
2859{
2860        struct sw_pg *cons_rx_pg, *prod_rx_pg;
2861        struct rx_bd *cons_bd, *prod_bd;
2862        int i;
2863        u16 hw_prod, prod;
2864        u16 cons = rxr->rx_pg_cons;
2865
2866        cons_rx_pg = &rxr->rx_pg_ring[cons];
2867
2868        /* The caller was unable to allocate a new page to replace the
2869         * last one in the frags array, so we need to recycle that page
2870         * and then free the skb.
2871         */
2872        if (skb) {
2873                struct page *page;
2874                struct skb_shared_info *shinfo;
2875
2876                shinfo = skb_shinfo(skb);
2877                shinfo->nr_frags--;
2878                page = shinfo->frags[shinfo->nr_frags].page;
2879                shinfo->frags[shinfo->nr_frags].page = NULL;
2880
2881                cons_rx_pg->page = page;
2882                dev_kfree_skb(skb);
2883        }
2884
2885        hw_prod = rxr->rx_pg_prod;
2886
2887        for (i = 0; i < count; i++) {
2888                prod = RX_PG_RING_IDX(hw_prod);
2889
2890                prod_rx_pg = &rxr->rx_pg_ring[prod];
2891                cons_rx_pg = &rxr->rx_pg_ring[cons];
2892                cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2893                prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2894
2895                if (prod != cons) {
2896                        prod_rx_pg->page = cons_rx_pg->page;
2897                        cons_rx_pg->page = NULL;
2898                        pci_unmap_addr_set(prod_rx_pg, mapping,
2899                                pci_unmap_addr(cons_rx_pg, mapping));
2900
2901                        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2902                        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2903
2904                }
2905                cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2906                hw_prod = NEXT_RX_BD(hw_prod);
2907        }
2908        rxr->rx_pg_prod = hw_prod;
2909        rxr->rx_pg_cons = cons;
2910}
2911
2912static inline void
2913bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2914                  struct sk_buff *skb, u16 cons, u16 prod)
2915{
2916        struct sw_bd *cons_rx_buf, *prod_rx_buf;
2917        struct rx_bd *cons_bd, *prod_bd;
2918
2919        cons_rx_buf = &rxr->rx_buf_ring[cons];
2920        prod_rx_buf = &rxr->rx_buf_ring[prod];
2921
2922        pci_dma_sync_single_for_device(bp->pdev,
2923                pci_unmap_addr(cons_rx_buf, mapping),
2924                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2925
2926        rxr->rx_prod_bseq += bp->rx_buf_use_size;
2927
2928        prod_rx_buf->skb = skb;
2929
2930        if (cons == prod)
2931                return;
2932
2933        pci_unmap_addr_set(prod_rx_buf, mapping,
2934                        pci_unmap_addr(cons_rx_buf, mapping));
2935
2936        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2937        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2938        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2939        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2940}
2941
2942static int
2943bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2944            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2945            u32 ring_idx)
2946{
2947        int err;
2948        u16 prod = ring_idx & 0xffff;
2949
2950        err = bnx2_alloc_rx_skb(bp, rxr, prod);
2951        if (unlikely(err)) {
2952                bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2953                if (hdr_len) {
2954                        unsigned int raw_len = len + 4;
2955                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2956
2957                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2958                }
2959                return err;
2960        }
2961
2962        skb_reserve(skb, BNX2_RX_OFFSET);
2963        pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2964                         PCI_DMA_FROMDEVICE);
2965
2966        if (hdr_len == 0) {
2967                skb_put(skb, len);
2968                return 0;
2969        } else {
2970                unsigned int i, frag_len, frag_size, pages;
2971                struct sw_pg *rx_pg;
2972                u16 pg_cons = rxr->rx_pg_cons;
2973                u16 pg_prod = rxr->rx_pg_prod;
2974
2975                frag_size = len + 4 - hdr_len;
2976                pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2977                skb_put(skb, hdr_len);
2978
2979                for (i = 0; i < pages; i++) {
2980                        dma_addr_t mapping_old;
2981
2982                        frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2983                        if (unlikely(frag_len <= 4)) {
2984                                unsigned int tail = 4 - frag_len;
2985
2986                                rxr->rx_pg_cons = pg_cons;
2987                                rxr->rx_pg_prod = pg_prod;
2988                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2989                                                        pages - i);
2990                                skb->len -= tail;
2991                                if (i == 0) {
2992                                        skb->tail -= tail;
2993                                } else {
2994                                        skb_frag_t *frag =
2995                                                &skb_shinfo(skb)->frags[i - 1];
2996                                        frag->size -= tail;
2997                                        skb->data_len -= tail;
2998                                        skb->truesize -= tail;
2999                                }
3000                                return 0;
3001                        }
3002                        rx_pg = &rxr->rx_pg_ring[pg_cons];
3003
3004                        /* Don't unmap yet.  If we're unable to allocate a new
3005                         * page, we need to recycle the page and the DMA addr.
3006                         */
3007                        mapping_old = pci_unmap_addr(rx_pg, mapping);
3008                        if (i == pages - 1)
3009                                frag_len -= 4;
3010
3011                        skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3012                        rx_pg->page = NULL;
3013
3014                        err = bnx2_alloc_rx_page(bp, rxr,
3015                                                 RX_PG_RING_IDX(pg_prod));
3016                        if (unlikely(err)) {
3017                                rxr->rx_pg_cons = pg_cons;
3018                                rxr->rx_pg_prod = pg_prod;
3019                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3020                                                        pages - i);
3021                                return err;
3022                        }
3023
3024                        pci_unmap_page(bp->pdev, mapping_old,
3025                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3026
3027                        frag_size -= frag_len;
3028                        skb->data_len += frag_len;
3029                        skb->truesize += frag_len;
3030                        skb->len += frag_len;
3031
3032                        pg_prod = NEXT_RX_BD(pg_prod);
3033                        pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3034                }
3035                rxr->rx_pg_prod = pg_prod;
3036                rxr->rx_pg_cons = pg_cons;
3037        }
3038        return 0;
3039}
3040
3041static inline u16
3042bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3043{
3044        u16 cons;
3045
3046        /* Tell compiler that status block fields can change. */
3047        barrier();
3048        cons = *bnapi->hw_rx_cons_ptr;
3049        barrier();
3050        if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3051                cons++;
3052        return cons;
3053}
3054
3055static int
3056bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3057{
3058        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3059        u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3060        struct l2_fhdr *rx_hdr;
3061        int rx_pkt = 0, pg_ring_used = 0;
3062
3063        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3064        sw_cons = rxr->rx_cons;
3065        sw_prod = rxr->rx_prod;
3066
3067        /* Memory barrier necessary as speculative reads of the rx
3068         * buffer can be ahead of the index in the status block
3069         */
3070        rmb();
3071        while (sw_cons != hw_cons) {
3072                unsigned int len, hdr_len;
3073                u32 status;
3074                struct sw_bd *rx_buf;
3075                struct sk_buff *skb;
3076                dma_addr_t dma_addr;
3077                u16 vtag = 0;
3078                int hw_vlan __maybe_unused = 0;
3079
3080                sw_ring_cons = RX_RING_IDX(sw_cons);
3081                sw_ring_prod = RX_RING_IDX(sw_prod);
3082
3083                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3084                skb = rx_buf->skb;
3085
3086                rx_buf->skb = NULL;
3087
3088                dma_addr = pci_unmap_addr(rx_buf, mapping);
3089
3090                pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3091                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3092                        PCI_DMA_FROMDEVICE);
3093
3094                rx_hdr = (struct l2_fhdr *) skb->data;
3095                len = rx_hdr->l2_fhdr_pkt_len;
3096                status = rx_hdr->l2_fhdr_status;
3097
3098                hdr_len = 0;
3099                if (status & L2_FHDR_STATUS_SPLIT) {
3100                        hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3101                        pg_ring_used = 1;
3102                } else if (len > bp->rx_jumbo_thresh) {
3103                        hdr_len = bp->rx_jumbo_thresh;
3104                        pg_ring_used = 1;
3105                }
3106
3107                if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3108                                       L2_FHDR_ERRORS_PHY_DECODE |
3109                                       L2_FHDR_ERRORS_ALIGNMENT |
3110                                       L2_FHDR_ERRORS_TOO_SHORT |
3111                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
3112
3113                        bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3114                                          sw_ring_prod);
3115                        if (pg_ring_used) {
3116                                int pages;
3117
3118                                pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3119
3120                                bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3121                        }
3122                        goto next_rx;
3123                }
3124
3125                len -= 4;
3126
3127                if (len <= bp->rx_copy_thresh) {
3128                        struct sk_buff *new_skb;
3129
3130                        new_skb = netdev_alloc_skb(bp->dev, len + 6);
3131                        if (new_skb == NULL) {
3132                                bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3133                                                  sw_ring_prod);
3134                                goto next_rx;
3135                        }
3136
3137                        /* aligned copy */
3138                        skb_copy_from_linear_data_offset(skb,
3139                                                         BNX2_RX_OFFSET - 6,
3140                                      new_skb->data, len + 6);
3141                        skb_reserve(new_skb, 6);
3142                        skb_put(new_skb, len);
3143
3144                        bnx2_reuse_rx_skb(bp, rxr, skb,
3145                                sw_ring_cons, sw_ring_prod);
3146
3147                        skb = new_skb;
3148                } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3149                           dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3150                        goto next_rx;
3151
3152                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3153                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3154                        vtag = rx_hdr->l2_fhdr_vlan_tag;
3155#ifdef BCM_VLAN
3156                        if (bp->vlgrp)
3157                                hw_vlan = 1;
3158                        else
3159#endif
3160                        {
3161                                struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3162                                        __skb_push(skb, 4);
3163
3164                                memmove(ve, skb->data + 4, ETH_ALEN * 2);
3165                                ve->h_vlan_proto = htons(ETH_P_8021Q);
3166                                ve->h_vlan_TCI = htons(vtag);
3167                                len += 4;
3168                        }
3169                }
3170
3171                skb->protocol = eth_type_trans(skb, bp->dev);
3172
3173                if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3174                        (ntohs(skb->protocol) != 0x8100)) {
3175
3176                        dev_kfree_skb(skb);
3177                        goto next_rx;
3178
3179                }
3180
3181                skb->ip_summed = CHECKSUM_NONE;
3182                if (bp->rx_csum &&
3183                        (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3184                        L2_FHDR_STATUS_UDP_DATAGRAM))) {
3185
3186                        if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3187                                              L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3188                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3189                }
3190
3191                skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3192
3193#ifdef BCM_VLAN
3194                if (hw_vlan)
3195                        vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3196                else
3197#endif
3198                        netif_receive_skb(skb);
3199
3200                rx_pkt++;
3201
3202next_rx:
3203                sw_cons = NEXT_RX_BD(sw_cons);
3204                sw_prod = NEXT_RX_BD(sw_prod);
3205
3206                if ((rx_pkt == budget))
3207                        break;
3208
3209                /* Refresh hw_cons to see if there is new work */
3210                if (sw_cons == hw_cons) {
3211                        hw_cons = bnx2_get_hw_rx_cons(bnapi);
3212                        rmb();
3213                }
3214        }
3215        rxr->rx_cons = sw_cons;
3216        rxr->rx_prod = sw_prod;
3217
3218        if (pg_ring_used)
3219                REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3220
3221        REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3222
3223        REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3224
3225        mmiowb();
3226
3227        return rx_pkt;
3228
3229}
3230
3231/* MSI ISR - The only difference between this and the INTx ISR
3232 * is that the MSI interrupt is always serviced.
3233 */
3234static irqreturn_t
3235bnx2_msi(int irq, void *dev_instance)
3236{
3237        struct bnx2_napi *bnapi = dev_instance;
3238        struct bnx2 *bp = bnapi->bp;
3239
3240        prefetch(bnapi->status_blk.msi);
3241        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3242                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3243                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3244
3245        /* Return here if interrupt is disabled. */
3246        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3247                return IRQ_HANDLED;
3248
3249        napi_schedule(&bnapi->napi);
3250
3251        return IRQ_HANDLED;
3252}
3253
3254static irqreturn_t
3255bnx2_msi_1shot(int irq, void *dev_instance)
3256{
3257        struct bnx2_napi *bnapi = dev_instance;
3258        struct bnx2 *bp = bnapi->bp;
3259
3260        prefetch(bnapi->status_blk.msi);
3261
3262        /* Return here if interrupt is disabled. */
3263        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3264                return IRQ_HANDLED;
3265
3266        napi_schedule(&bnapi->napi);
3267
3268        return IRQ_HANDLED;
3269}
3270
3271static irqreturn_t
3272bnx2_interrupt(int irq, void *dev_instance)
3273{
3274        struct bnx2_napi *bnapi = dev_instance;
3275        struct bnx2 *bp = bnapi->bp;
3276        struct status_block *sblk = bnapi->status_blk.msi;
3277
3278        /* When using INTx, it is possible for the interrupt to arrive
3279         * at the CPU before the status block posted prior to the
3280         * interrupt. Reading a register will flush the status block.
3281         * When using MSI, the MSI message will always complete after
3282         * the status block write.
3283         */
3284        if ((sblk->status_idx == bnapi->last_status_idx) &&
3285            (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3286             BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3287                return IRQ_NONE;
3288
3289        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3290                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3291                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3292
3293        /* Read back to deassert IRQ immediately to avoid too many
3294         * spurious interrupts.
3295         */
3296        REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3297
3298        /* Return here if interrupt is shared and is disabled. */
3299        if (unlikely(atomic_read(&bp->intr_sem) != 0))
3300                return IRQ_HANDLED;
3301
3302        if (napi_schedule_prep(&bnapi->napi)) {
3303                bnapi->last_status_idx = sblk->status_idx;
3304                __napi_schedule(&bnapi->napi);
3305        }
3306
3307        return IRQ_HANDLED;
3308}
3309
3310static inline int
3311bnx2_has_fast_work(struct bnx2_napi *bnapi)
3312{
3313        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3314        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3315
3316        if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3317            (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3318                return 1;
3319        return 0;
3320}
3321
3322#define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3323                                 STATUS_ATTN_BITS_TIMER_ABORT)
3324
3325static inline int
3326bnx2_has_work(struct bnx2_napi *bnapi)
3327{
3328        struct status_block *sblk = bnapi->status_blk.msi;
3329
3330        if (bnx2_has_fast_work(bnapi))
3331                return 1;
3332
3333#ifdef BCM_CNIC
3334        if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3335                return 1;
3336#endif
3337
3338        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3339            (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3340                return 1;
3341
3342        return 0;
3343}
3344
3345static void
3346bnx2_chk_missed_msi(struct bnx2 *bp)
3347{
3348        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3349        u32 msi_ctrl;
3350
3351        if (bnx2_has_work(bnapi)) {
3352                msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3353                if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3354                        return;
3355
3356                if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3357                        REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3358                               ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3359                        REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3360                        bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3361                }
3362        }
3363
3364        bp->idle_chk_status_idx = bnapi->last_status_idx;
3365}
3366
3367#ifdef BCM_CNIC
3368static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3369{
3370        struct cnic_ops *c_ops;
3371
3372        if (!bnapi->cnic_present)
3373                return;
3374
3375        rcu_read_lock();
3376        c_ops = rcu_dereference(bp->cnic_ops);
3377        if (c_ops)
3378                bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3379                                                      bnapi->status_blk.msi);
3380        rcu_read_unlock();
3381}
3382#endif
3383
3384static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3385{
3386        struct status_block *sblk = bnapi->status_blk.msi;
3387        u32 status_attn_bits = sblk->status_attn_bits;
3388        u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3389
3390        if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3391            (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3392
3393                bnx2_phy_int(bp, bnapi);
3394
3395                /* This is needed to take care of transient status
3396                 * during link changes.
3397                 */
3398                REG_WR(bp, BNX2_HC_COMMAND,
3399                       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3400                REG_RD(bp, BNX2_HC_COMMAND);
3401        }
3402}
3403
3404static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3405                          int work_done, int budget)
3406{
3407        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3408        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3409
3410        if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3411                bnx2_tx_int(bp, bnapi, 0);
3412
3413        if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3414                work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3415
3416        return work_done;
3417}
3418
3419static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3420{
3421        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3422        struct bnx2 *bp = bnapi->bp;
3423        int work_done = 0;
3424        struct status_block_msix *sblk = bnapi->status_blk.msix;
3425
3426        while (1) {
3427                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3428                if (unlikely(work_done >= budget))
3429                        break;
3430
3431                bnapi->last_status_idx = sblk->status_idx;
3432                /* status idx must be read before checking for more work. */
3433                rmb();
3434                if (likely(!bnx2_has_fast_work(bnapi))) {
3435
3436                        napi_complete(napi);
3437                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3438                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3439                               bnapi->last_status_idx);
3440                        break;
3441                }
3442        }
3443        return work_done;
3444}
3445
3446static int bnx2_poll(struct napi_struct *napi, int budget)
3447{
3448        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3449        struct bnx2 *bp = bnapi->bp;
3450        int work_done = 0;
3451        struct status_block *sblk = bnapi->status_blk.msi;
3452
3453        while (1) {
3454                bnx2_poll_link(bp, bnapi);
3455
3456                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3457
3458#ifdef BCM_CNIC
3459                bnx2_poll_cnic(bp, bnapi);
3460#endif
3461
3462                /* bnapi->last_status_idx is used below to tell the hw how
3463                 * much work has been processed, so we must read it before
3464                 * checking for more work.
3465                 */
3466                bnapi->last_status_idx = sblk->status_idx;
3467
3468                if (unlikely(work_done >= budget))
3469                        break;
3470
3471                rmb();
3472                if (likely(!bnx2_has_work(bnapi))) {
3473                        napi_complete(napi);
3474                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3475                                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476                                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477                                       bnapi->last_status_idx);
3478                                break;
3479                        }
3480                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3481                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482                               BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3483                               bnapi->last_status_idx);
3484
3485                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3486                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3487                               bnapi->last_status_idx);
3488                        break;
3489                }
3490        }
3491
3492        return work_done;
3493}
3494
3495/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3496 * from set_multicast.
3497 */
3498static void
3499bnx2_set_rx_mode(struct net_device *dev)
3500{
3501        struct bnx2 *bp = netdev_priv(dev);
3502        u32 rx_mode, sort_mode;
3503        struct netdev_hw_addr *ha;
3504        int i;
3505
3506        if (!netif_running(dev))
3507                return;
3508
3509        spin_lock_bh(&bp->phy_lock);
3510
3511        rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3512                                  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3513        sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3514#ifdef BCM_VLAN
3515        if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3516                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3517#else
3518        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3519                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3520#endif
3521        if (dev->flags & IFF_PROMISC) {
3522                /* Promiscuous mode. */
3523                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3524                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3525                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3526        }
3527        else if (dev->flags & IFF_ALLMULTI) {
3528                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3529                        REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3530                               0xffffffff);
3531                }
3532                sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3533        }
3534        else {
3535                /* Accept one or more multicast(s). */
3536                struct dev_mc_list *mclist;
3537                u32 mc_filter[NUM_MC_HASH_REGISTERS];
3538                u32 regidx;
3539                u32 bit;
3540                u32 crc;
3541
3542                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3543
3544                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3545                     i++, mclist = mclist->next) {
3546
3547                        crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3548                        bit = crc & 0xff;
3549                        regidx = (bit & 0xe0) >> 5;
3550                        bit &= 0x1f;
3551                        mc_filter[regidx] |= (1 << bit);
3552                }
3553
3554                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3555                        REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3556                               mc_filter[i]);
3557                }
3558
3559                sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3560        }
3561
3562        if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3563                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3564                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3565                             BNX2_RPM_SORT_USER0_PROM_VLAN;
3566        } else if (!(dev->flags & IFF_PROMISC)) {
3567                /* Add all entries into to the match filter list */
3568                i = 0;
3569                list_for_each_entry(ha, &dev->uc.list, list) {
3570                        bnx2_set_mac_addr(bp, ha->addr,
3571                                          i + BNX2_START_UNICAST_ADDRESS_INDEX);
3572                        sort_mode |= (1 <<
3573                                      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3574                        i++;
3575                }
3576
3577        }
3578
3579        if (rx_mode != bp->rx_mode) {
3580                bp->rx_mode = rx_mode;
3581                REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3582        }
3583
3584        REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3585        REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3586        REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3587
3588        spin_unlock_bh(&bp->phy_lock);
3589}
3590
3591static int __devinit
3592check_fw_section(const struct firmware *fw,
3593                 const struct bnx2_fw_file_section *section,
3594                 u32 alignment, bool non_empty)
3595{
3596        u32 offset = be32_to_cpu(section->offset);
3597        u32 len = be32_to_cpu(section->len);
3598
3599        if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3600                return -EINVAL;
3601        if ((non_empty && len == 0) || len > fw->size - offset ||
3602            len & (alignment - 1))
3603                return -EINVAL;
3604        return 0;
3605}
3606
3607static int __devinit
3608check_mips_fw_entry(const struct firmware *fw,
3609                    const struct bnx2_mips_fw_file_entry *entry)
3610{
3611        if (check_fw_section(fw, &entry->text, 4, true) ||
3612            check_fw_section(fw, &entry->data, 4, false) ||
3613            check_fw_section(fw, &entry->rodata, 4, false))
3614                return -EINVAL;
3615        return 0;
3616}
3617
3618static int __devinit
3619bnx2_request_firmware(struct bnx2 *bp)
3620{
3621        const char *mips_fw_file, *rv2p_fw_file;
3622        const struct bnx2_mips_fw_file *mips_fw;
3623        const struct bnx2_rv2p_fw_file *rv2p_fw;
3624        int rc;
3625
3626        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3627                mips_fw_file = FW_MIPS_FILE_09;
3628                if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3629                    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3630                        rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3631                else
3632                        rv2p_fw_file = FW_RV2P_FILE_09;
3633        } else {
3634                mips_fw_file = FW_MIPS_FILE_06;
3635                rv2p_fw_file = FW_RV2P_FILE_06;
3636        }
3637
3638        rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3639        if (rc) {
3640                printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3641                       mips_fw_file);
3642                return rc;
3643        }
3644
3645        rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3646        if (rc) {
3647                printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3648                       rv2p_fw_file);
3649                return rc;
3650        }
3651        mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3652        rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3653        if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3654            check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3655            check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3656            check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3657            check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3658            check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3659                printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3660                       mips_fw_file);
3661                return -EINVAL;
3662        }
3663        if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3664            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3665            check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3666                printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3667                       rv2p_fw_file);
3668                return -EINVAL;
3669        }
3670
3671        return 0;
3672}
3673
3674static u32
3675rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3676{
3677        switch (idx) {
3678        case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3679                rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3680                rv2p_code |= RV2P_BD_PAGE_SIZE;
3681                break;
3682        }
3683        return rv2p_code;
3684}
3685
3686static int
3687load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3688             const struct bnx2_rv2p_fw_file_entry *fw_entry)
3689{
3690        u32 rv2p_code_len, file_offset;
3691        __be32 *rv2p_code;
3692        int i;
3693        u32 val, cmd, addr;
3694
3695        rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3696        file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3697
3698        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3699
3700        if (rv2p_proc == RV2P_PROC1) {
3701                cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3702                addr = BNX2_RV2P_PROC1_ADDR_CMD;
3703        } else {
3704                cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3705                addr = BNX2_RV2P_PROC2_ADDR_CMD;
3706        }
3707
3708        for (i = 0; i < rv2p_code_len; i += 8) {
3709                REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3710                rv2p_code++;
3711                REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3712                rv2p_code++;
3713
3714                val = (i / 8) | cmd;
3715                REG_WR(bp, addr, val);
3716        }
3717
3718        rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3719        for (i = 0; i < 8; i++) {
3720                u32 loc, code;
3721
3722                loc = be32_to_cpu(fw_entry->fixup[i]);
3723                if (loc && ((loc * 4) < rv2p_code_len)) {
3724                        code = be32_to_cpu(*(rv2p_code + loc - 1));
3725                        REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3726                        code = be32_to_cpu(*(rv2p_code + loc));
3727                        code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3728                        REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3729
3730                        val = (loc / 2) | cmd;
3731                        REG_WR(bp, addr, val);
3732                }
3733        }
3734
3735        /* Reset the processor, un-stall is done later. */
3736        if (rv2p_proc == RV2P_PROC1) {
3737                REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3738        }
3739        else {
3740                REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3741        }
3742
3743        return 0;
3744}
3745
3746static int
3747load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3748            const struct bnx2_mips_fw_file_entry *fw_entry)
3749{
3750        u32 addr, len, file_offset;
3751        __be32 *data;
3752        u32 offset;
3753        u32 val;
3754
3755        /* Halt the CPU. */
3756        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3757        val |= cpu_reg->mode_value_halt;
3758        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3759        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3760
3761        /* Load the Text area. */
3762        addr = be32_to_cpu(fw_entry->text.addr);
3763        len = be32_to_cpu(fw_entry->text.len);
3764        file_offset = be32_to_cpu(fw_entry->text.offset);
3765        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3766
3767        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3768        if (len) {
3769                int j;
3770
3771                for (j = 0; j < (len / 4); j++, offset += 4)
3772                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3773        }
3774
3775        /* Load the Data area. */
3776        addr = be32_to_cpu(fw_entry->data.addr);
3777        len = be32_to_cpu(fw_entry->data.len);
3778        file_offset = be32_to_cpu(fw_entry->data.offset);
3779        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3780
3781        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3782        if (len) {
3783                int j;
3784
3785                for (j = 0; j < (len / 4); j++, offset += 4)
3786                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3787        }
3788
3789        /* Load the Read-Only area. */
3790        addr = be32_to_cpu(fw_entry->rodata.addr);
3791        len = be32_to_cpu(fw_entry->rodata.len);
3792        file_offset = be32_to_cpu(fw_entry->rodata.offset);
3793        data = (__be32 *)(bp->mips_firmware->data + file_offset);
3794
3795        offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3796        if (len) {
3797                int j;
3798
3799                for (j = 0; j < (len / 4); j++, offset += 4)
3800                        bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3801        }
3802
3803        /* Clear the pre-fetch instruction. */
3804        bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3805
3806        val = be32_to_cpu(fw_entry->start_addr);
3807        bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3808
3809        /* Start the CPU. */
3810        val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3811        val &= ~cpu_reg->mode_value_halt;
3812        bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813        bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3814
3815        return 0;
3816}
3817
3818static int
3819bnx2_init_cpus(struct bnx2 *bp)
3820{
3821        const struct bnx2_mips_fw_file *mips_fw =
3822                (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3823        const struct bnx2_rv2p_fw_file *rv2p_fw =
3824                (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3825        int rc;
3826
3827        /* Initialize the RV2P processor. */
3828        load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3829        load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3830
3831        /* Initialize the RX Processor. */
3832        rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3833        if (rc)
3834                goto init_cpu_err;
3835
3836        /* Initialize the TX Processor. */
3837        rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3838        if (rc)
3839                goto init_cpu_err;
3840
3841        /* Initialize the TX Patch-up Processor. */
3842        rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3843        if (rc)
3844                goto init_cpu_err;
3845
3846        /* Initialize the Completion Processor. */
3847        rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3848        if (rc)
3849                goto init_cpu_err;
3850
3851        /* Initialize the Command Processor. */
3852        rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3853
3854init_cpu_err:
3855        return rc;
3856}
3857
3858static int
3859bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3860{
3861        u16 pmcsr;
3862
3863        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3864
3865        switch (state) {
3866        case PCI_D0: {
3867                u32 val;
3868
3869                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3870                        (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3871                        PCI_PM_CTRL_PME_STATUS);
3872
3873                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3874                        /* delay required during transition out of D3hot */
3875                        msleep(20);
3876
3877                val = REG_RD(bp, BNX2_EMAC_MODE);
3878                val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3879                val &= ~BNX2_EMAC_MODE_MPKT;
3880                REG_WR(bp, BNX2_EMAC_MODE, val);
3881
3882                val = REG_RD(bp, BNX2_RPM_CONFIG);
3883                val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3884                REG_WR(bp, BNX2_RPM_CONFIG, val);
3885                break;
3886        }
3887        case PCI_D3hot: {
3888                int i;
3889                u32 val, wol_msg;
3890
3891                if (bp->wol) {
3892                        u32 advertising;
3893                        u8 autoneg;
3894
3895                        autoneg = bp->autoneg;
3896                        advertising = bp->advertising;
3897
3898                        if (bp->phy_port == PORT_TP) {
3899                                bp->autoneg = AUTONEG_SPEED;
3900                                bp->advertising = ADVERTISED_10baseT_Half |
3901                                        ADVERTISED_10baseT_Full |
3902                                        ADVERTISED_100baseT_Half |
3903                                        ADVERTISED_100baseT_Full |
3904                                        ADVERTISED_Autoneg;
3905                        }
3906
3907                        spin_lock_bh(&bp->phy_lock);
3908                        bnx2_setup_phy(bp, bp->phy_port);
3909                        spin_unlock_bh(&bp->phy_lock);
3910
3911                        bp->autoneg = autoneg;
3912                        bp->advertising = advertising;
3913
3914                        bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3915
3916                        val = REG_RD(bp, BNX2_EMAC_MODE);
3917
3918                        /* Enable port mode. */
3919                        val &= ~BNX2_EMAC_MODE_PORT;
3920                        val |= BNX2_EMAC_MODE_MPKT_RCVD |
3921                               BNX2_EMAC_MODE_ACPI_RCVD |
3922                               BNX2_EMAC_MODE_MPKT;
3923                        if (bp->phy_port == PORT_TP)
3924                                val |= BNX2_EMAC_MODE_PORT_MII;
3925                        else {
3926                                val |= BNX2_EMAC_MODE_PORT_GMII;
3927                                if (bp->line_speed == SPEED_2500)
3928                                        val |= BNX2_EMAC_MODE_25G_MODE;
3929                        }
3930
3931                        REG_WR(bp, BNX2_EMAC_MODE, val);
3932
3933                        /* receive all multicast */
3934                        for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3935                                REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3936                                       0xffffffff);
3937                        }
3938                        REG_WR(bp, BNX2_EMAC_RX_MODE,
3939                               BNX2_EMAC_RX_MODE_SORT_MODE);
3940
3941                        val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3942                              BNX2_RPM_SORT_USER0_MC_EN;
3943                        REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3944                        REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3945                        REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3946                               BNX2_RPM_SORT_USER0_ENA);
3947
3948                        /* Need to enable EMAC and RPM for WOL. */
3949                        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3950                               BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3951                               BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3952                               BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3953
3954                        val = REG_RD(bp, BNX2_RPM_CONFIG);
3955                        val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3956                        REG_WR(bp, BNX2_RPM_CONFIG, val);
3957
3958                        wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3959                }
3960                else {
3961                        wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3962                }
3963
3964                if (!(bp->flags & BNX2_FLAG_NO_WOL))
3965                        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3966                                     1, 0);
3967
3968                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3969                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3970                    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3971
3972                        if (bp->wol)
3973                                pmcsr |= 3;
3974                }
3975                else {
3976                        pmcsr |= 3;
3977                }
3978                if (bp->wol) {
3979                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3980                }
3981                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3982                                      pmcsr);
3983
3984                /* No more memory access after this point until
3985                 * device is brought back to D0.
3986                 */
3987                udelay(50);
3988                break;
3989        }
3990        default:
3991                return -EINVAL;
3992        }
3993        return 0;
3994}
3995
3996static int
3997bnx2_acquire_nvram_lock(struct bnx2 *bp)
3998{
3999        u32 val;
4000        int j;
4001
4002        /* Request access to the flash interface. */
4003        REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4004        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4005                val = REG_RD(bp, BNX2_NVM_SW_ARB);
4006                if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4007                        break;
4008
4009                udelay(5);
4010        }
4011
4012        if (j >= NVRAM_TIMEOUT_COUNT)
4013                return -EBUSY;
4014
4015        return 0;
4016}
4017
4018static int
4019bnx2_release_nvram_lock(struct bnx2 *bp)
4020{
4021        int j;
4022        u32 val;
4023
4024        /* Relinquish nvram interface. */
4025        REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4026
4027        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4028                val = REG_RD(bp, BNX2_NVM_SW_ARB);
4029                if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4030                        break;
4031
4032                udelay(5);
4033        }
4034
4035        if (j >= NVRAM_TIMEOUT_COUNT)
4036                return -EBUSY;
4037
4038        return 0;
4039}
4040
4041
4042static int
4043bnx2_enable_nvram_write(struct bnx2 *bp)
4044{
4045        u32 val;
4046
4047        val = REG_RD(bp, BNX2_MISC_CFG);
4048        REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4049
4050        if (bp->flash_info->flags & BNX2_NV_WREN) {
4051                int j;
4052
4053                REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4054                REG_WR(bp, BNX2_NVM_COMMAND,
4055                       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4056
4057                for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058                        udelay(5);
4059
4060                        val = REG_RD(bp, BNX2_NVM_COMMAND);
4061                        if (val & BNX2_NVM_COMMAND_DONE)
4062                                break;
4063                }
4064
4065                if (j >= NVRAM_TIMEOUT_COUNT)
4066                        return -EBUSY;
4067        }
4068        return 0;
4069}
4070
4071static void
4072bnx2_disable_nvram_write(struct bnx2 *bp)
4073{
4074        u32 val;
4075
4076        val = REG_RD(bp, BNX2_MISC_CFG);
4077        REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4078}
4079
4080
4081static void
4082bnx2_enable_nvram_access(struct bnx2 *bp)
4083{
4084        u32 val;
4085
4086        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4087        /* Enable both bits, even on read. */
4088        REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4089               val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4090}
4091
4092static void
4093bnx2_disable_nvram_access(struct bnx2 *bp)
4094{
4095        u32 val;
4096
4097        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4098        /* Disable both bits, even after read. */
4099        REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4100                val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4101                        BNX2_NVM_ACCESS_ENABLE_WR_EN));
4102}
4103
4104static int
4105bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4106{
4107        u32 cmd;
4108        int j;
4109
4110        if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4111                /* Buffered flash, no erase needed */
4112                return 0;
4113
4114        /* Build an erase command */
4115        cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4116              BNX2_NVM_COMMAND_DOIT;
4117
4118        /* Need to clear DONE bit separately. */
4119        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4120
4121        /* Address of the NVRAM to read from. */
4122        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4123
4124        /* Issue an erase command. */
4125        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4126
4127        /* Wait for completion. */
4128        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4129                u32 val;
4130
4131                udelay(5);
4132
4133                val = REG_RD(bp, BNX2_NVM_COMMAND);
4134                if (val & BNX2_NVM_COMMAND_DONE)
4135                        break;
4136        }
4137
4138        if (j >= NVRAM_TIMEOUT_COUNT)
4139                return -EBUSY;
4140
4141        return 0;
4142}
4143
4144static int
4145bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4146{
4147        u32 cmd;
4148        int j;
4149
4150        /* Build the command word. */
4151        cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4152
4153        /* Calculate an offset of a buffered flash, not needed for 5709. */
4154        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4155                offset = ((offset / bp->flash_info->page_size) <<
4156                           bp->flash_info->page_bits) +
4157                          (offset % bp->flash_info->page_size);
4158        }
4159
4160        /* Need to clear DONE bit separately. */
4161        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4162
4163        /* Address of the NVRAM to read from. */
4164        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4165
4166        /* Issue a read command. */
4167        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4168
4169        /* Wait for completion. */
4170        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4171                u32 val;
4172
4173                udelay(5);
4174
4175                val = REG_RD(bp, BNX2_NVM_COMMAND);
4176                if (val & BNX2_NVM_COMMAND_DONE) {
4177                        __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4178                        memcpy(ret_val, &v, 4);
4179                        break;
4180                }
4181        }
4182        if (j >= NVRAM_TIMEOUT_COUNT)
4183                return -EBUSY;
4184
4185        return 0;
4186}
4187
4188
4189static int
4190bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4191{
4192        u32 cmd;
4193        __be32 val32;
4194        int j;
4195
4196        /* Build the command word. */
4197        cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4198
4199        /* Calculate an offset of a buffered flash, not needed for 5709. */
4200        if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4201                offset = ((offset / bp->flash_info->page_size) <<
4202                          bp->flash_info->page_bits) +
4203                         (offset % bp->flash_info->page_size);
4204        }
4205
4206        /* Need to clear DONE bit separately. */
4207        REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4208
4209        memcpy(&val32, val, 4);
4210
4211        /* Write the data. */
4212        REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4213
4214        /* Address of the NVRAM to write to. */
4215        REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4216
4217        /* Issue the write command. */
4218        REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4219
4220        /* Wait for completion. */
4221        for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4222                udelay(5);
4223
4224                if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4225                        break;
4226        }
4227        if (j >= NVRAM_TIMEOUT_COUNT)
4228                return -EBUSY;
4229
4230        return 0;
4231}
4232
4233static int
4234bnx2_init_nvram(struct bnx2 *bp)
4235{
4236        u32 val;
4237        int j, entry_count, rc = 0;
4238        const struct flash_spec *flash;
4239
4240        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4241                bp->flash_info = &flash_5709;
4242                goto get_flash_size;
4243        }
4244
4245        /* Determine the selected interface. */
4246        val = REG_RD(bp, BNX2_NVM_CFG1);
4247
4248        entry_count = ARRAY_SIZE(flash_table);
4249
4250        if (val & 0x40000000) {
4251
4252                /* Flash interface has been reconfigured */
4253                for (j = 0, flash = &flash_table[0]; j < entry_count;
4254                     j++, flash++) {
4255                        if ((val & FLASH_BACKUP_STRAP_MASK) ==
4256                            (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4257                                bp->flash_info = flash;
4258                                break;
4259                        }
4260                }
4261        }
4262        else {
4263                u32 mask;
4264                /* Not yet been reconfigured */
4265
4266                if (val & (1 << 23))
4267                        mask = FLASH_BACKUP_STRAP_MASK;
4268                else
4269                        mask = FLASH_STRAP_MASK;
4270
4271                for (j = 0, flash = &flash_table[0]; j < entry_count;
4272                        j++, flash++) {
4273
4274                        if ((val & mask) == (flash->strapping & mask)) {
4275                                bp->flash_info = flash;
4276
4277                                /* Request access to the flash interface. */
4278                                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4279                                        return rc;
4280
4281                                /* Enable access to flash interface */
4282                                bnx2_enable_nvram_access(bp);
4283
4284                                /* Reconfigure the flash interface */
4285                                REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4286                                REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4287                                REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4288                                REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4289
4290                                /* Disable access to flash interface */
4291                                bnx2_disable_nvram_access(bp);
4292                                bnx2_release_nvram_lock(bp);
4293
4294                                break;
4295                        }
4296                }
4297        } /* if (val & 0x40000000) */
4298
4299        if (j == entry_count) {
4300                bp->flash_info = NULL;
4301                printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4302                return -ENODEV;
4303        }
4304
4305get_flash_size:
4306        val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4307        val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4308        if (val)
4309                bp->flash_size = val;
4310        else
4311                bp->flash_size = bp->flash_info->total_size;
4312
4313        return rc;
4314}
4315
4316static int
4317bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4318                int buf_size)
4319{
4320        int rc = 0;
4321        u32 cmd_flags, offset32, len32, extra;
4322
4323        if (buf_size == 0)
4324                return 0;
4325
4326        /* Request access to the flash interface. */
4327        if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4328                return rc;
4329
4330        /* Enable access to flash interface */
4331        bnx2_enable_nvram_access(bp);
4332
4333        len32 = buf_size;
4334        offset32 = offset;
4335        extra = 0;
4336
4337        cmd_flags = 0;
4338
4339        if (offset32 & 3) {
4340                u8 buf[4];
4341                u32 pre_len;
4342
4343                offset32 &= ~3;
4344                pre_len = 4 - (offset & 3);
4345
4346                if (pre_len >= len32) {
4347                        pre_len = len32;
4348                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4349                                    BNX2_NVM_COMMAND_LAST;
4350                }
4351                else {
4352                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4353                }
4354
4355                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4356
4357                if (rc)
4358                        return rc;
4359
4360                memcpy(ret_buf, buf + (offset & 3), pre_len);
4361
4362                offset32 += 4;
4363                ret_buf += pre_len;
4364                len32 -= pre_len;
4365        }
4366        if (len32 & 3) {
4367                extra = 4 - (len32 & 3);
4368                len32 = (len32 + 4) & ~3;
4369        }
4370
4371        if (len32 == 4) {
4372                u8 buf[4];
4373
4374                if (cmd_flags)
4375                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4376                else
4377                        cmd_flags = BNX2_NVM_COMMAND_FIRST |
4378                                    BNX2_NVM_COMMAND_LAST;
4379
4380                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4381
4382                memcpy(ret_buf, buf, 4 - extra);
4383        }
4384        else if (len32 > 0) {
4385                u8 buf[4];
4386
4387                /* Read the first word. */
4388                if (cmd_flags)
4389                        cmd_flags = 0;
4390                else
4391                        cmd_flags = BNX2_NVM_COMMAND_FIRST;
4392
4393                rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4394
4395                /* Advance to the next dword. */
4396                offset32 += 4;
4397                ret_buf += 4;
4398                len32 -= 4;
4399
4400                while (len32 > 4 && rc == 0) {
4401                        rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4402
4403                        /* Advance to the next dword. */
4404                        offset32 += 4;
4405                        ret_buf += 4;
4406                        len32 -= 4;
4407                }
4408
4409                if (rc)
4410                        return rc;
4411
4412                cmd_flags = BNX2_NVM_COMMAND_LAST;
4413                rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4414
4415                memcpy(ret_buf, buf, 4 - extra);
4416        }
4417
4418        /* Disable access to flash interface */
4419        bnx2_disable_nvram_access(bp);
4420
4421        bnx2_release_nvram_lock(bp);
4422
4423        return rc;
4424}
4425
4426static int
4427bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4428                int buf_size)
4429{
4430        u32 written, offset32, len32;
4431        u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4432        int rc = 0;
4433        int align_start, align_end;
4434
4435        buf = data_buf;
4436        offset32 = offset;
4437        len32 = buf_size;
4438        align_start = align_end = 0;
4439
4440        if ((align_start = (offset32 & 3))) {
4441                offset32 &= ~3;
4442                len32 += align_start;
4443                if (len32 < 4)
4444                        len32 = 4;
4445                if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4446                        return rc;
4447        }
4448
4449        if (len32 & 3) {
4450                align_end = 4 - (len32 & 3);
4451                len32 += align_end;
4452                if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4453                        return rc;
4454        }
4455
4456        if (align_start || align_end) {
4457                align_buf = kmalloc(len32, GFP_KERNEL);
4458                if (align_buf == NULL)
4459                        return -ENOMEM;
4460                if (align_start) {
4461                        memcpy(align_buf, start, 4);
4462                }
4463                if (align_end) {
4464                        memcpy(align_buf + len32 - 4, end, 4);
4465                }
4466                memcpy(align_buf + align_start, data_buf, buf_size);
4467                buf = align_buf;
4468        }
4469
4470        if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4471                flash_buffer = kmalloc(264, GFP_KERNEL);
4472                if (flash_buffer == NULL) {
4473                        rc = -ENOMEM;
4474                        goto nvram_write_end;
4475                }
4476        }
4477
4478        written = 0;
4479        while ((written < len32) && (rc == 0)) {
4480                u32 page_start, page_end, data_start, data_end;
4481                u32 addr, cmd_flags;
4482                int i;
4483
4484                /* Find the page_start addr */
4485                page_start = offset32 + written;
4486                page_start -= (page_start % bp->flash_info->page_size);
4487                /* Find the page_end addr */
4488                page_end = page_start + bp->flash_info->page_size;
4489                /* Find the data_start addr */
4490                data_start = (written == 0) ? offset32 : page_start;
4491                /* Find the data_end addr */
4492                data_end = (page_end > offset32 + len32) ?
4493                        (offset32 + len32) : page_end;
4494
4495                /* Request access to the flash interface. */
4496                if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4497                        goto nvram_write_end;
4498
4499                /* Enable access to flash interface */
4500                bnx2_enable_nvram_access(bp);
4501
4502                cmd_flags = BNX2_NVM_COMMAND_FIRST;
4503                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4504                        int j;
4505
4506                        /* Read the whole page into the buffer
4507                         * (non-buffer flash only) */
4508                        for (j = 0; j < bp->flash_info->page_size; j += 4) {
4509                                if (j == (bp->flash_info->page_size - 4)) {
4510                                        cmd_flags |= BNX2_NVM_COMMAND_LAST;
4511                                }
4512                                rc = bnx2_nvram_read_dword(bp,
4513                                        page_start + j,
4514                                        &flash_buffer[j],
4515                                        cmd_flags);
4516
4517                                if (rc)
4518                                        goto nvram_write_end;
4519
4520                                cmd_flags = 0;
4521                        }
4522                }
4523
4524                /* Enable writes to flash interface (unlock write-protect) */
4525                if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4526                        goto nvram_write_end;
4527
4528                /* Loop to write back the buffer data from page_start to
4529                 * data_start */
4530                i = 0;
4531                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4532                        /* Erase the page */
4533                        if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4534                                goto nvram_write_end;
4535
4536                        /* Re-enable the write again for the actual write */
4537                        bnx2_enable_nvram_write(bp);
4538
4539                        for (addr = page_start; addr < data_start;
4540                                addr += 4, i += 4) {
4541
4542                                rc = bnx2_nvram_write_dword(bp, addr,
4543                                        &flash_buffer[i], cmd_flags);
4544
4545                                if (rc != 0)
4546                                        goto nvram_write_end;
4547
4548                                cmd_flags = 0;
4549                        }
4550                }
4551
4552                /* Loop to write the new data from data_start to data_end */
4553                for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4554                        if ((addr == page_end - 4) ||
4555                                ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4556                                 (addr == data_end - 4))) {
4557
4558                                cmd_flags |= BNX2_NVM_COMMAND_LAST;
4559                        }
4560                        rc = bnx2_nvram_write_dword(bp, addr, buf,
4561                                cmd_flags);
4562
4563                        if (rc != 0)
4564                                goto nvram_write_end;
4565
4566                        cmd_flags = 0;
4567                        buf += 4;
4568                }
4569
4570                /* Loop to write back the buffer data from data_end
4571                 * to page_end */
4572                if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4573                        for (addr = data_end; addr < page_end;
4574                                addr += 4, i += 4) {
4575
4576                                if (addr == page_end-4) {
4577                                        cmd_flags = BNX2_NVM_COMMAND_LAST;
4578                                }
4579                                rc = bnx2_nvram_write_dword(bp, addr,
4580                                        &flash_buffer[i], cmd_flags);
4581
4582                                if (rc != 0)
4583                                        goto nvram_write_end;
4584
4585                                cmd_flags = 0;
4586                        }
4587                }
4588
4589                /* Disable writes to flash interface (lock write-protect) */
4590                bnx2_disable_nvram_write(bp);
4591
4592                /* Disable access to flash interface */
4593                bnx2_disable_nvram_access(bp);
4594                bnx2_release_nvram_lock(bp);
4595
4596                /* Increment written */
4597                written += data_end - data_start;
4598        }
4599
4600nvram_write_end:
4601        kfree(flash_buffer);
4602        kfree(align_buf);
4603        return rc;
4604}
4605
4606static void
4607bnx2_init_fw_cap(struct bnx2 *bp)
4608{
4609        u32 val, sig = 0;
4610
4611        bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4612        bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4613
4614        if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4615                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4616
4617        val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4618        if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4619                return;
4620
4621        if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4622                bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4623                sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4624        }
4625
4626        if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4627            (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4628                u32 link;
4629
4630                bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4631
4632                link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4633                if (link & BNX2_LINK_STATUS_SERDES_LINK)
4634                        bp->phy_port = PORT_FIBRE;
4635                else
4636                        bp->phy_port = PORT_TP;
4637
4638                sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4639                       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4640        }
4641
4642        if (netif_running(bp->dev) && sig)
4643                bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4644}
4645
4646static void
4647bnx2_setup_msix_tbl(struct bnx2 *bp)
4648{
4649        REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4650
4651        REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4652        REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4653}
4654
4655static int
4656bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4657{
4658        u32 val;
4659        int i, rc = 0;
4660        u8 old_port;
4661
4662        /* Wait for the current PCI transaction to complete before
4663         * issuing a reset. */
4664        REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4665               BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4666               BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4667               BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4668               BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4669        val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4670        udelay(5);
4671
4672        /* Wait for the firmware to tell us it is ok to issue a reset. */
4673        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4674
4675        /* Deposit a driver reset signature so the firmware knows that
4676         * this is a soft reset. */
4677        bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4678                      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4679
4680        /* Do a dummy read to force the chip to complete all current transaction
4681         * before we issue a reset. */
4682        val = REG_RD(bp, BNX2_MISC_ID);
4683
4684        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4685                REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4686                REG_RD(bp, BNX2_MISC_COMMAND);
4687                udelay(5);
4688
4689                val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4690                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4691
4692                pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4693
4694        } else {
4695                val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4696                      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4697                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4698
4699                /* Chip reset. */
4700                REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4701
4702                /* Reading back any register after chip reset will hang the
4703                 * bus on 5706 A0 and A1.  The msleep below provides plenty
4704                 * of margin for write posting.
4705                 */
4706                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4707                    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4708                        msleep(20);
4709
4710                /* Reset takes approximate 30 usec */
4711                for (i = 0; i < 10; i++) {
4712                        val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4713                        if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714                                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4715                                break;
4716                        udelay(10);
4717                }
4718
4719                if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4720                           BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4721                        printk(KERN_ERR PFX "Chip reset did not complete\n");
4722                        return -EBUSY;
4723                }
4724        }
4725
4726        /* Make sure byte swapping is properly configured. */
4727        val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4728        if (val != 0x01020304) {
4729                printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4730                return -ENODEV;
4731        }
4732
4733        /* Wait for the firmware to finish its initialization. */
4734        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4735        if (rc)
4736                return rc;
4737
4738        spin_lock_bh(&bp->phy_lock);
4739        old_port = bp->phy_port;
4740        bnx2_init_fw_cap(bp);
4741        if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4742            old_port != bp->phy_port)
4743                bnx2_set_default_remote_link(bp);
4744        spin_unlock_bh(&bp->phy_lock);
4745
4746        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4747                /* Adjust the voltage regular to two steps lower.  The default
4748                 * of this register is 0x0000000e. */
4749                REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4750
4751                /* Remove bad rbuf memory from the free pool. */
4752                rc = bnx2_alloc_bad_rbuf(bp);
4753        }
4754
4755        if (bp->flags & BNX2_FLAG_USING_MSIX)
4756                bnx2_setup_msix_tbl(bp);
4757
4758        return rc;
4759}
4760
4761static int
4762bnx2_init_chip(struct bnx2 *bp)
4763{
4764        u32 val, mtu;
4765        int rc, i;
4766
4767        /* Make sure the interrupt is not active. */
4768        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4769
4770        val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4771              BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4772#ifdef __BIG_ENDIAN
4773              BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4774#endif
4775              BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4776              DMA_READ_CHANS << 12 |
4777              DMA_WRITE_CHANS << 16;
4778
4779        val |= (0x2 << 20) | (1 << 11);
4780
4781        if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4782                val |= (1 << 23);
4783
4784        if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4785            (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4786                val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4787
4788        REG_WR(bp, BNX2_DMA_CONFIG, val);
4789
4790        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4791                val = REG_RD(bp, BNX2_TDMA_CONFIG);
4792                val |= BNX2_TDMA_CONFIG_ONE_DMA;
4793                REG_WR(bp, BNX2_TDMA_CONFIG, val);
4794        }
4795
4796        if (bp->flags & BNX2_FLAG_PCIX) {
4797                u16 val16;
4798
4799                pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                     &val16);
4801                pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4802                                      val16 & ~PCI_X_CMD_ERO);
4803        }
4804
4805        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4806               BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4807               BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4808               BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4809
4810        /* Initialize context mapping and zero out the quick contexts.  The
4811         * context block must have already been enabled. */
4812        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4813                rc = bnx2_init_5709_context(bp);
4814                if (rc)
4815                        return rc;
4816        } else
4817                bnx2_init_context(bp);
4818
4819        if ((rc = bnx2_init_cpus(bp)) != 0)
4820                return rc;
4821
4822        bnx2_init_nvram(bp);
4823
4824        bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4825
4826        val = REG_RD(bp, BNX2_MQ_CONFIG);
4827        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4828        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4829        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4830                val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4831                if (CHIP_REV(bp) == CHIP_REV_Ax)
4832                        val |= BNX2_MQ_CONFIG_HALT_DIS;
4833        }
4834
4835        REG_WR(bp, BNX2_MQ_CONFIG, val);
4836
4837        val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4838        REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4839        REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4840
4841        val = (BCM_PAGE_BITS - 8) << 24;
4842        REG_WR(bp, BNX2_RV2P_CONFIG, val);
4843
4844        /* Configure page size. */
4845        val = REG_RD(bp, BNX2_TBDR_CONFIG);
4846        val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4847        val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4848        REG_WR(bp, BNX2_TBDR_CONFIG, val);
4849
4850        val = bp->mac_addr[0] +
4851              (bp->mac_addr[1] << 8) +
4852              (bp->mac_addr[2] << 16) +
4853              bp->mac_addr[3] +
4854              (bp->mac_addr[4] << 8) +
4855              (bp->mac_addr[5] << 16);
4856        REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4857
4858        /* Program the MTU.  Also include 4 bytes for CRC32. */
4859        mtu = bp->dev->mtu;
4860        val = mtu + ETH_HLEN + ETH_FCS_LEN;
4861        if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4862                val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4863        REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4864
4865        if (mtu < 1500)
4866                mtu = 1500;
4867
4868        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4869        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4870        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4871
4872        memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4873        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4874                bp->bnx2_napi[i].last_status_idx = 0;
4875
4876        bp->idle_chk_status_idx = 0xffff;
4877
4878        bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4879
4880        /* Set up how to generate a link change interrupt. */
4881        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4882
4883        REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4884               (u64) bp->status_blk_mapping & 0xffffffff);
4885        REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4886
4887        REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4888               (u64) bp->stats_blk_mapping & 0xffffffff);
4889        REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4890               (u64) bp->stats_blk_mapping >> 32);
4891
4892        REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4893               (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4894
4895        REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4896               (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4897
4898        REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4899               (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4900
4901        REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4902
4903        REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4904
4905        REG_WR(bp, BNX2_HC_COM_TICKS,
4906               (bp->com_ticks_int << 16) | bp->com_ticks);
4907
4908        REG_WR(bp, BNX2_HC_CMD_TICKS,
4909               (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4910
4911        if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4912                REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4913        else
4914                REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4915        REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4916
4917        if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4918                val = BNX2_HC_CONFIG_COLLECT_STATS;
4919        else {
4920                val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4921                      BNX2_HC_CONFIG_COLLECT_STATS;
4922        }
4923
4924        if (bp->irq_nvecs > 1) {
4925                REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4926                       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4927
4928                val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4929        }
4930
4931        if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4932                val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4933
4934        REG_WR(bp, BNX2_HC_CONFIG, val);
4935
4936        for (i = 1; i < bp->irq_nvecs; i++) {
4937                u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4938                           BNX2_HC_SB_CONFIG_1;
4939
4940                REG_WR(bp, base,
4941                        BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4942                        BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4943                        BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4944
4945                REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4946                        (bp->tx_quick_cons_trip_int << 16) |
4947                         bp->tx_quick_cons_trip);
4948
4949                REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4950                        (bp->tx_ticks_int << 16) | bp->tx_ticks);
4951
4952                REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4953                       (bp->rx_quick_cons_trip_int << 16) |
4954                        bp->rx_quick_cons_trip);
4955
4956                REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4957                        (bp->rx_ticks_int << 16) | bp->rx_ticks);
4958        }
4959
4960        /* Clear internal stats counters. */
4961        REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4962
4963        REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4964
4965        /* Initialize the receive filter. */
4966        bnx2_set_rx_mode(bp->dev);
4967
4968        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4969                val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4970                val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4971                REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4972        }
4973        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4974                          1, 0);
4975
4976        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4977        REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4978
4979        udelay(20);
4980
4981        bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4982
4983        return rc;
4984}
4985
4986static void
4987bnx2_clear_ring_states(struct bnx2 *bp)
4988{
4989        struct bnx2_napi *bnapi;
4990        struct bnx2_tx_ring_info *txr;
4991        struct bnx2_rx_ring_info *rxr;
4992        int i;
4993
4994        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4995                bnapi = &bp->bnx2_napi[i];
4996                txr = &bnapi->tx_ring;
4997                rxr = &bnapi->rx_ring;
4998
4999                txr->tx_cons = 0;
5000                txr->hw_tx_cons = 0;
5001                rxr->rx_prod_bseq = 0;
5002                rxr->rx_prod = 0;
5003                rxr->rx_cons = 0;
5004                rxr->rx_pg_prod = 0;
5005                rxr->rx_pg_cons = 0;
5006        }
5007}
5008
5009static void
5010bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5011{
5012        u32 val, offset0, offset1, offset2, offset3;
5013        u32 cid_addr = GET_CID_ADDR(cid);
5014
5015        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5016                offset0 = BNX2_L2CTX_TYPE_XI;
5017                offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5018                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5019                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5020        } else {
5021                offset0 = BNX2_L2CTX_TYPE;
5022                offset1 = BNX2_L2CTX_CMD_TYPE;
5023                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5024                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5025        }
5026        val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5027        bnx2_ctx_wr(bp, cid_addr, offset0, val);
5028
5029        val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5030        bnx2_ctx_wr(bp, cid_addr, offset1, val);
5031
5032        val = (u64) txr->tx_desc_mapping >> 32;
5033        bnx2_ctx_wr(bp, cid_addr, offset2, val);
5034
5035        val = (u64) txr->tx_desc_mapping & 0xffffffff;
5036        bnx2_ctx_wr(bp, cid_addr, offset3, val);
5037}
5038
5039static void
5040bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5041{
5042        struct tx_bd *txbd;
5043        u32 cid = TX_CID;
5044        struct bnx2_napi *bnapi;
5045        struct bnx2_tx_ring_info *txr;
5046
5047        bnapi = &bp->bnx2_napi[ring_num];
5048        txr = &bnapi->tx_ring;
5049
5050        if (ring_num == 0)
5051                cid = TX_CID;
5052        else
5053                cid = TX_TSS_CID + ring_num - 1;
5054
5055        bp->tx_wake_thresh = bp->tx_ring_size / 2;
5056
5057        txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5058
5059        txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5060        txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5061
5062        txr->tx_prod = 0;
5063        txr->tx_prod_bseq = 0;
5064
5065        txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5066        txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5067
5068        bnx2_init_tx_context(bp, cid, txr);
5069}
5070
5071static void
5072bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5073                     int num_rings)
5074{
5075        int i;
5076        struct rx_bd *rxbd;
5077
5078        for (i = 0; i < num_rings; i++) {
5079                int j;
5080
5081                rxbd = &rx_ring[i][0];
5082                for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5083                        rxbd->rx_bd_len = buf_size;
5084                        rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5085                }
5086                if (i == (num_rings - 1))
5087                        j = 0;
5088                else
5089                        j = i + 1;
5090                rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5091                rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5092        }
5093}
5094
5095static void
5096bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5097{
5098        int i;
5099        u16 prod, ring_prod;
5100        u32 cid, rx_cid_addr, val;
5101        struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5102        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5103
5104        if (ring_num == 0)
5105                cid = RX_CID;
5106        else
5107                cid = RX_RSS_CID + ring_num - 1;
5108
5109        rx_cid_addr = GET_CID_ADDR(cid);
5110
5111        bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5112                             bp->rx_buf_use_size, bp->rx_max_ring);
5113
5114        bnx2_init_rx_context(bp, cid);
5115
5116        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5117                val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5118                REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5119        }
5120
5121        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5122        if (bp->rx_pg_ring_size) {
5123                bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5124                                     rxr->rx_pg_desc_mapping,
5125                                     PAGE_SIZE, bp->rx_max_pg_ring);
5126                val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5127                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5128                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5129                       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5130
5131                val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5132                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5133
5134                val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5135                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5136
5137                if (CHIP_NUM(bp) == CHIP_NUM_5709)
5138                        REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5139        }
5140
5141        val = (u64) rxr->rx_desc_mapping[0] >> 32;
5142        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5143
5144        val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5145        bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5146
5147        ring_prod = prod = rxr->rx_pg_prod;
5148        for (i = 0; i < bp->rx_pg_ring_size; i++) {
5149                if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5150                        break;
5151                prod = NEXT_RX_BD(prod);
5152                ring_prod = RX_PG_RING_IDX(prod);
5153        }
5154        rxr->rx_pg_prod = prod;
5155
5156        ring_prod = prod = rxr->rx_prod;
5157        for (i = 0; i < bp->rx_ring_size; i++) {
5158                if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5159                        break;
5160                prod = NEXT_RX_BD(prod);
5161                ring_prod = RX_RING_IDX(prod);
5162        }
5163        rxr->rx_prod = prod;
5164
5165        rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5166        rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5167        rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5168
5169        REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5170        REG_WR16(bp, rxr->rx_bidx_addr, prod);
5171
5172        REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5173}
5174
5175static void
5176bnx2_init_all_rings(struct bnx2 *bp)
5177{
5178        int i;
5179        u32 val;
5180
5181        bnx2_clear_ring_states(bp);
5182
5183        REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5184        for (i = 0; i < bp->num_tx_rings; i++)
5185                bnx2_init_tx_ring(bp, i);
5186
5187        if (bp->num_tx_rings > 1)
5188                REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5189                       (TX_TSS_CID << 7));
5190
5191        REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5192        bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5193
5194        for (i = 0; i < bp->num_rx_rings; i++)
5195                bnx2_init_rx_ring(bp, i);
5196
5197        if (bp->num_rx_rings > 1) {
5198                u32 tbl_32;
5199                u8 *tbl = (u8 *) &tbl_32;
5200
5201                bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5202                                BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5203
5204                for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5205                        tbl[i % 4] = i % (bp->num_rx_rings - 1);
5206                        if ((i % 4) == 3)
5207                                bnx2_reg_wr_ind(bp,
5208                                                BNX2_RXP_SCRATCH_RSS_TBL + i,
5209                                                cpu_to_be32(tbl_32));
5210                }
5211
5212                val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5213                      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5214
5215                REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5216
5217        }
5218}
5219
5220static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5221{
5222        u32 max, num_rings = 1;
5223
5224        while (ring_size > MAX_RX_DESC_CNT) {
5225                ring_size -= MAX_RX_DESC_CNT;
5226                num_rings++;
5227        }
5228        /* round to next power of 2 */
5229        max = max_size;
5230        while ((max & num_rings) == 0)
5231                max >>= 1;
5232
5233        if (num_rings != max)
5234                max <<= 1;
5235
5236        return max;
5237}
5238
5239static void
5240bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5241{
5242        u32 rx_size, rx_space, jumbo_size;
5243
5244        /* 8 for CRC and VLAN */
5245        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5246
5247        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5248                sizeof(struct skb_shared_info);
5249
5250        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5251        bp->rx_pg_ring_size = 0;
5252        bp->rx_max_pg_ring = 0;
5253        bp->rx_max_pg_ring_idx = 0;
5254        if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5255                int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5256
5257                jumbo_size = size * pages;
5258                if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5259                        jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5260
5261                bp->rx_pg_ring_size = jumbo_size;
5262                bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5263                                                        MAX_RX_PG_RINGS);
5264                bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5265                rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5266                bp->rx_copy_thresh = 0;
5267        }
5268
5269        bp->rx_buf_use_size = rx_size;
5270        /* hw alignment */
5271        bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5272        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5273        bp->rx_ring_size = size;
5274        bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5275        bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5276}
5277
5278static void
5279bnx2_free_tx_skbs(struct bnx2 *bp)
5280{
5281        int i;
5282
5283        for (i = 0; i < bp->num_tx_rings; i++) {
5284                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5285                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5286                int j;
5287
5288                if (txr->tx_buf_ring == NULL)
5289                        continue;
5290
5291                for (j = 0; j < TX_DESC_CNT; ) {
5292                        struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5293                        struct sk_buff *skb = tx_buf->skb;
5294
5295                        if (skb == NULL) {
5296                                j++;
5297                                continue;
5298                        }
5299
5300                        skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5301
5302                        tx_buf->skb = NULL;
5303
5304                        j += skb_shinfo(skb)->nr_frags + 1;
5305                        dev_kfree_skb(skb);
5306                }
5307        }
5308}
5309
5310static void
5311bnx2_free_rx_skbs(struct bnx2 *bp)
5312{
5313        int i;
5314
5315        for (i = 0; i < bp->num_rx_rings; i++) {
5316                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5317                struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5318                int j;
5319
5320                if (rxr->rx_buf_ring == NULL)
5321                        return;
5322
5323                for (j = 0; j < bp->rx_max_ring_idx; j++) {
5324                        struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5325                        struct sk_buff *skb = rx_buf->skb;
5326
5327                        if (skb == NULL)
5328                                continue;
5329
5330                        pci_unmap_single(bp->pdev,
5331                                         pci_unmap_addr(rx_buf, mapping),
5332                                         bp->rx_buf_use_size,
5333                                         PCI_DMA_FROMDEVICE);
5334
5335                        rx_buf->skb = NULL;
5336
5337                        dev_kfree_skb(skb);
5338                }
5339                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5340                        bnx2_free_rx_page(bp, rxr, j);
5341        }
5342}
5343
5344static void
5345bnx2_free_skbs(struct bnx2 *bp)
5346{
5347        bnx2_free_tx_skbs(bp);
5348        bnx2_free_rx_skbs(bp);
5349}
5350
5351static int
5352bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5353{
5354        int rc;
5355
5356        rc = bnx2_reset_chip(bp, reset_code);
5357        bnx2_free_skbs(bp);
5358        if (rc)
5359                return rc;
5360
5361        if ((rc = bnx2_init_chip(bp)) != 0)
5362                return rc;
5363
5364        bnx2_init_all_rings(bp);
5365        return 0;
5366}
5367
5368static int
5369bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5370{
5371        int rc;
5372
5373        if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5374                return rc;
5375
5376        spin_lock_bh(&bp->phy_lock);
5377        bnx2_init_phy(bp, reset_phy);
5378        bnx2_set_link(bp);
5379        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5380                bnx2_remote_phy_event(bp);
5381        spin_unlock_bh(&bp->phy_lock);
5382        return 0;
5383}
5384
5385static int
5386bnx2_shutdown_chip(struct bnx2 *bp)
5387{
5388        u32 reset_code;
5389
5390        if (bp->flags & BNX2_FLAG_NO_WOL)
5391                reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5392        else if (bp->wol)
5393                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5394        else
5395                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5396
5397        return bnx2_reset_chip(bp, reset_code);
5398}
5399
5400static int
5401bnx2_test_registers(struct bnx2 *bp)
5402{
5403        int ret;
5404        int i, is_5709;
5405        static const struct {
5406                u16   offset;
5407                u16   flags;
5408#define BNX2_FL_NOT_5709        1
5409                u32   rw_mask;
5410                u32   ro_mask;
5411        } reg_tbl[] = {
5412                { 0x006c, 0, 0x00000000, 0x0000003f },
5413                { 0x0090, 0, 0xffffffff, 0x00000000 },
5414                { 0x0094, 0, 0x00000000, 0x00000000 },
5415
5416                { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5417                { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5418                { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5419                { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5420                { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5421                { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5422                { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5423                { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5424                { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5425
5426                { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5427                { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5428                { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5429                { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5430                { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5431                { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5432
5433                { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5434                { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5435                { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5436
5437                { 0x1000, 0, 0x00000000, 0x00000001 },
5438                { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5439
5440                { 0x1408, 0, 0x01c00800, 0x00000000 },
5441                { 0x149c, 0, 0x8000ffff, 0x00000000 },
5442                { 0x14a8, 0, 0x00000000, 0x000001ff },
5443                { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5444                { 0x14b0, 0, 0x00000002, 0x00000001 },
5445                { 0x14b8, 0, 0x00000000, 0x00000000 },
5446                { 0x14c0, 0, 0x00000000, 0x00000009 },
5447                { 0x14c4, 0, 0x00003fff, 0x00000000 },
5448                { 0x14cc, 0, 0x00000000, 0x00000001 },
5449                { 0x14d0, 0, 0xffffffff, 0x00000000 },
5450
5451                { 0x1800, 0, 0x00000000, 0x00000001 },
5452                { 0x1804, 0, 0x00000000, 0x00000003 },
5453
5454                { 0x2800, 0, 0x00000000, 0x00000001 },
5455                { 0x2804, 0, 0x00000000, 0x00003f01 },
5456                { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5457                { 0x2810, 0, 0xffff0000, 0x00000000 },
5458                { 0x2814, 0, 0xffff0000, 0x00000000 },
5459                { 0x2818, 0, 0xffff0000, 0x00000000 },
5460                { 0x281c, 0, 0xffff0000, 0x00000000 },
5461                { 0x2834, 0, 0xffffffff, 0x00000000 },
5462                { 0x2840, 0, 0x00000000, 0xffffffff },
5463                { 0x2844, 0, 0x00000000, 0xffffffff },
5464                { 0x2848, 0, 0xffffffff, 0x00000000 },
5465                { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5466
5467                { 0x2c00, 0, 0x00000000, 0x00000011 },
5468                { 0x2c04, 0, 0x00000000, 0x00030007 },
5469
5470                { 0x3c00, 0, 0x00000000, 0x00000001 },
5471                { 0x3c04, 0, 0x00000000, 0x00070000 },
5472                { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5473                { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5474                { 0x3c10, 0, 0xffffffff, 0x00000000 },
5475                { 0x3c14, 0, 0x00000000, 0xffffffff },
5476                { 0x3c18, 0, 0x00000000, 0xffffffff },
5477                { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5478                { 0x3c20, 0, 0xffffff00, 0x00000000 },
5479
5480                { 0x5004, 0, 0x00000000, 0x0000007f },
5481                { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5482
5483                { 0x5c00, 0, 0x00000000, 0x00000001 },
5484                { 0x5c04, 0, 0x00000000, 0x0003000f },
5485                { 0x5c08, 0, 0x00000003, 0x00000000 },
5486                { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5487                { 0x5c10, 0, 0x00000000, 0xffffffff },
5488                { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5489                { 0x5c84, 0, 0x00000000, 0x0000f333 },
5490                { 0x5c88, 0, 0x00000000, 0x00077373 },
5491                { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5492
5493                { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5494                { 0x680c, 0, 0xffffffff, 0x00000000 },
5495                { 0x6810, 0, 0xffffffff, 0x00000000 },
5496                { 0x6814, 0, 0xffffffff, 0x00000000 },
5497                { 0x6818, 0, 0xffffffff, 0x00000000 },
5498                { 0x681c, 0, 0xffffffff, 0x00000000 },
5499                { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5500                { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5501                { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5502                { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5503                { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5504                { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5505                { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5506                { 0x683c, 0, 0x0000ffff, 0x00000000 },
5507                { 0x6840, 0, 0x00000ff0, 0x00000000 },
5508                { 0x6844, 0, 0x00ffff00, 0x00000000 },
5509                { 0x684c, 0, 0xffffffff, 0x00000000 },
5510                { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5511                { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5512                { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5513                { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5514                { 0x6908, 0, 0x00000000, 0x0001ff0f },
5515                { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5516
5517                { 0xffff, 0, 0x00000000, 0x00000000 },
5518        };
5519
5520        ret = 0;
5521        is_5709 = 0;
5522        if (CHIP_NUM(bp) == CHIP_NUM_5709)
5523                is_5709 = 1;
5524
5525        for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5526                u32 offset, rw_mask, ro_mask, save_val, val;
5527                u16 flags = reg_tbl[i].flags;
5528
5529                if (is_5709 && (flags & BNX2_FL_NOT_5709))
5530                        continue;
5531
5532                offset = (u32) reg_tbl[i].offset;
5533                rw_mask = reg_tbl[i].rw_mask;
5534                ro_mask = reg_tbl[i].ro_mask;
5535
5536                save_val = readl(bp->regview + offset);
5537
5538                writel(0, bp->regview + offset);
5539
5540                val = readl(bp->regview + offset);
5541                if ((val & rw_mask) != 0) {
5542                        goto reg_test_err;
5543                }
5544
5545                if ((val & ro_mask) != (save_val & ro_mask)) {
5546                        goto reg_test_err;
5547                }
5548
5549                writel(0xffffffff, bp->regview + offset);
5550
5551                val = readl(bp->regview + offset);
5552                if ((val & rw_mask) != rw_mask) {
5553                        goto reg_test_err;
5554                }
5555
5556                if ((val & ro_mask) != (save_val & ro_mask)) {
5557                        goto reg_test_err;
5558                }
5559
5560                writel(save_val, bp->regview + offset);
5561                continue;
5562
5563reg_test_err:
5564                writel(save_val, bp->regview + offset);
5565                ret = -ENODEV;
5566                break;
5567        }
5568        return ret;
5569}
5570
5571static int
5572bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5573{
5574        static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5575                0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5576        int i;
5577
5578        for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5579                u32 offset;
5580
5581                for (offset = 0; offset < size; offset += 4) {
5582
5583                        bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5584
5585                        if (bnx2_reg_rd_ind(bp, start + offset) !=
5586                                test_pattern[i]) {
5587                                return -ENODEV;
5588                        }
5589                }
5590        }
5591        return 0;
5592}
5593
5594static int
5595bnx2_test_memory(struct bnx2 *bp)
5596{
5597        int ret = 0;
5598        int i;
5599        static struct mem_entry {
5600                u32   offset;
5601                u32   len;
5602        } mem_tbl_5706[] = {
5603                { 0x60000,  0x4000 },
5604                { 0xa0000,  0x3000 },
5605                { 0xe0000,  0x4000 },
5606                { 0x120000, 0x4000 },
5607                { 0x1a0000, 0x4000 },
5608                { 0x160000, 0x4000 },
5609                { 0xffffffff, 0    },
5610        },
5611        mem_tbl_5709[] = {
5612                { 0x60000,  0x4000 },
5613                { 0xa0000,  0x3000 },
5614                { 0xe0000,  0x4000 },
5615                { 0x120000, 0x4000 },
5616                { 0x1a0000, 0x4000 },
5617                { 0xffffffff, 0    },
5618        };
5619        struct mem_entry *mem_tbl;
5620
5621        if (CHIP_NUM(bp) == CHIP_NUM_5709)
5622                mem_tbl = mem_tbl_5709;
5623        else
5624                mem_tbl = mem_tbl_5706;
5625
5626        for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5627                if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5628                        mem_tbl[i].len)) != 0) {
5629                        return ret;
5630                }
5631        }
5632
5633        return ret;
5634}
5635
5636#define BNX2_MAC_LOOPBACK       0
5637#define BNX2_PHY_LOOPBACK       1
5638
5639static int
5640bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5641{
5642        unsigned int pkt_size, num_pkts, i;
5643        struct sk_buff *skb, *rx_skb;
5644        unsigned char *packet;
5645        u16 rx_start_idx, rx_idx;
5646        dma_addr_t map;
5647        struct tx_bd *txbd;
5648        struct sw_bd *rx_buf;
5649        struct l2_fhdr *rx_hdr;
5650        int ret = -ENODEV;
5651        struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5652        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5653        struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5654
5655        tx_napi = bnapi;
5656
5657        txr = &tx_napi->tx_ring;
5658        rxr = &bnapi->rx_ring;
5659        if (loopback_mode == BNX2_MAC_LOOPBACK) {
5660                bp->loopback = MAC_LOOPBACK;
5661                bnx2_set_mac_loopback(bp);
5662        }
5663        else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5664                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5665                        return 0;
5666
5667                bp->loopback = PHY_LOOPBACK;
5668                bnx2_set_phy_loopback(bp);
5669        }
5670        else
5671                return -EINVAL;
5672
5673        pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5674        skb = netdev_alloc_skb(bp->dev, pkt_size);
5675        if (!skb)
5676                return -ENOMEM;
5677        packet = skb_put(skb, pkt_size);
5678        memcpy(packet, bp->dev->dev_addr, 6);
5679        memset(packet + 6, 0x0, 8);
5680        for (i = 14; i < pkt_size; i++)
5681                packet[i] = (unsigned char) (i & 0xff);
5682
5683        if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5684                dev_kfree_skb(skb);
5685                return -EIO;
5686        }
5687        map = skb_shinfo(skb)->dma_head;
5688
5689        REG_WR(bp, BNX2_HC_COMMAND,
5690               bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5691
5692        REG_RD(bp, BNX2_HC_COMMAND);
5693
5694        udelay(5);
5695        rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5696
5697        num_pkts = 0;
5698
5699        txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5700
5701        txbd->tx_bd_haddr_hi = (u64) map >> 32;
5702        txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5703        txbd->tx_bd_mss_nbytes = pkt_size;
5704        txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5705
5706        num_pkts++;
5707        txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5708        txr->tx_prod_bseq += pkt_size;
5709
5710        REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5711        REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5712
5713        udelay(100);
5714
5715        REG_WR(bp, BNX2_HC_COMMAND,
5716               bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5717
5718        REG_RD(bp, BNX2_HC_COMMAND);
5719
5720        udelay(5);
5721
5722        skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5723        dev_kfree_skb(skb);
5724
5725        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5726                goto loopback_test_done;
5727
5728        rx_idx = bnx2_get_hw_rx_cons(bnapi);
5729        if (rx_idx != rx_start_idx + num_pkts) {
5730                goto loopback_test_done;
5731        }
5732
5733        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5734        rx_skb = rx_buf->skb;
5735
5736        rx_hdr = (struct l2_fhdr *) rx_skb->data;
5737        skb_reserve(rx_skb, BNX2_RX_OFFSET);
5738
5739        pci_dma_sync_single_for_cpu(bp->pdev,
5740                pci_unmap_addr(rx_buf, mapping),
5741                bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5742
5743        if (rx_hdr->l2_fhdr_status &
5744                (L2_FHDR_ERRORS_BAD_CRC |
5745                L2_FHDR_ERRORS_PHY_DECODE |
5746                L2_FHDR_ERRORS_ALIGNMENT |
5747                L2_FHDR_ERRORS_TOO_SHORT |
5748                L2_FHDR_ERRORS_GIANT_FRAME)) {
5749
5750                goto loopback_test_done;
5751        }
5752
5753        if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5754                goto loopback_test_done;
5755        }
5756
5757        for (i = 14; i < pkt_size; i++) {
5758                if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5759                        goto loopback_test_done;
5760                }
5761        }
5762
5763        ret = 0;
5764
5765loopback_test_done:
5766        bp->loopback = 0;
5767        return ret;
5768}
5769
5770#define BNX2_MAC_LOOPBACK_FAILED        1
5771#define BNX2_PHY_LOOPBACK_FAILED        2
5772#define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5773                                         BNX2_PHY_LOOPBACK_FAILED)
5774
5775static int
5776bnx2_test_loopback(struct bnx2 *bp)
5777{
5778        int rc = 0;
5779
5780        if (!netif_running(bp->dev))
5781                return BNX2_LOOPBACK_FAILED;
5782
5783        bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5784        spin_lock_bh(&bp->phy_lock);
5785        bnx2_init_phy(bp, 1);
5786        spin_unlock_bh(&bp->phy_lock);
5787        if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5788                rc |= BNX2_MAC_LOOPBACK_FAILED;
5789        if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5790                rc |= BNX2_PHY_LOOPBACK_FAILED;
5791        return rc;
5792}
5793
5794#define NVRAM_SIZE 0x200
5795#define CRC32_RESIDUAL 0xdebb20e3
5796
5797static int
5798bnx2_test_nvram(struct bnx2 *bp)
5799{
5800        __be32 buf[NVRAM_SIZE / 4];
5801        u8 *data = (u8 *) buf;
5802        int rc = 0;
5803        u32 magic, csum;
5804
5805        if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5806                goto test_nvram_done;
5807
5808        magic = be32_to_cpu(buf[0]);
5809        if (magic != 0x669955aa) {
5810                rc = -ENODEV;
5811                goto test_nvram_done;
5812        }
5813
5814        if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5815                goto test_nvram_done;
5816
5817        csum = ether_crc_le(0x100, data);
5818        if (csum != CRC32_RESIDUAL) {
5819                rc = -ENODEV;
5820                goto test_nvram_done;
5821        }
5822
5823        csum = ether_crc_le(0x100, data + 0x100);
5824        if (csum != CRC32_RESIDUAL) {
5825                rc = -ENODEV;
5826        }
5827
5828test_nvram_done:
5829        return rc;
5830}
5831
5832static int
5833bnx2_test_link(struct bnx2 *bp)
5834{
5835        u32 bmsr;
5836
5837        if (!netif_running(bp->dev))
5838                return -ENODEV;
5839
5840        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5841                if (bp->link_up)
5842                        return 0;
5843                return -ENODEV;
5844        }
5845        spin_lock_bh(&bp->phy_lock);
5846        bnx2_enable_bmsr1(bp);
5847        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5848        bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5849        bnx2_disable_bmsr1(bp);
5850        spin_unlock_bh(&bp->phy_lock);
5851
5852        if (bmsr & BMSR_LSTATUS) {
5853                return 0;
5854        }
5855        return -ENODEV;
5856}
5857
5858static int
5859bnx2_test_intr(struct bnx2 *bp)
5860{
5861        int i;
5862        u16 status_idx;
5863
5864        if (!netif_running(bp->dev))
5865                return -ENODEV;
5866
5867        status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5868
5869        /* This register is not touched during run-time. */
5870        REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5871        REG_RD(bp, BNX2_HC_COMMAND);
5872
5873        for (i = 0; i < 10; i++) {
5874                if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5875                        status_idx) {
5876
5877                        break;
5878                }
5879
5880                msleep_interruptible(10);
5881        }
5882        if (i < 10)
5883                return 0;
5884
5885        return -ENODEV;
5886}
5887
5888/* Determining link for parallel detection. */
5889static int
5890bnx2_5706_serdes_has_link(struct bnx2 *bp)
5891{
5892        u32 mode_ctl, an_dbg, exp;
5893
5894        if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5895                return 0;
5896
5897        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5898        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5899
5900        if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5901                return 0;
5902
5903        bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5904        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5905        bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5906
5907        if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5908                return 0;
5909
5910        bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5911        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5912        bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5913
5914        if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5915                return 0;
5916
5917        return 1;
5918}
5919
5920static void
5921bnx2_5706_serdes_timer(struct bnx2 *bp)
5922{
5923        int check_link = 1;
5924
5925        spin_lock(&bp->phy_lock);
5926        if (bp->serdes_an_pending) {
5927                bp->serdes_an_pending--;
5928                check_link = 0;
5929        } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5930                u32 bmcr;
5931
5932                bp->current_interval = BNX2_TIMER_INTERVAL;
5933
5934                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5935
5936                if (bmcr & BMCR_ANENABLE) {
5937                        if (bnx2_5706_serdes_has_link(bp)) {
5938                                bmcr &= ~BMCR_ANENABLE;
5939                                bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5940                                bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5941                                bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5942                        }
5943                }
5944        }
5945        else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5946                 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5947                u32 phy2;
5948
5949                bnx2_write_phy(bp, 0x17, 0x0f01);
5950                bnx2_read_phy(bp, 0x15, &phy2);
5951                if (phy2 & 0x20) {
5952                        u32 bmcr;
5953
5954                        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5955                        bmcr |= BMCR_ANENABLE;
5956                        bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5957
5958                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5959                }
5960        } else
5961                bp->current_interval = BNX2_TIMER_INTERVAL;
5962
5963        if (check_link) {
5964                u32 val;
5965
5966                bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5967                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5968                bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5969
5970                if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5971                        if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5972                                bnx2_5706s_force_link_dn(bp, 1);
5973                                bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5974                        } else
5975                                bnx2_set_link(bp);
5976                } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5977                        bnx2_set_link(bp);
5978        }
5979        spin_unlock(&bp->phy_lock);
5980}
5981
5982static void
5983bnx2_5708_serdes_timer(struct bnx2 *bp)
5984{
5985        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5986                return;
5987
5988        if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5989                bp->serdes_an_pending = 0;
5990                return;
5991        }
5992
5993        spin_lock(&bp->phy_lock);
5994        if (bp->serdes_an_pending)
5995                bp->serdes_an_pending--;
5996        else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5997                u32 bmcr;
5998
5999                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6000                if (bmcr & BMCR_ANENABLE) {
6001                        bnx2_enable_forced_2g5(bp);
6002                        bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6003                } else {
6004                        bnx2_disable_forced_2g5(bp);
6005                        bp->serdes_an_pending = 2;
6006                        bp->current_interval = BNX2_TIMER_INTERVAL;
6007                }
6008
6009        } else
6010                bp->current_interval = BNX2_TIMER_INTERVAL;
6011
6012        spin_unlock(&bp->phy_lock);
6013}
6014
6015static void
6016bnx2_timer(unsigned long data)
6017{
6018        struct bnx2 *bp = (struct bnx2 *) data;
6019
6020        if (!netif_running(bp->dev))
6021                return;
6022
6023        if (atomic_read(&bp->intr_sem) != 0)
6024                goto bnx2_restart_timer;
6025
6026        if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6027             BNX2_FLAG_USING_MSI)
6028                bnx2_chk_missed_msi(bp);
6029
6030        bnx2_send_heart_beat(bp);
6031
6032        bp->stats_blk->stat_FwRxDrop =
6033                bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6034
6035        /* workaround occasional corrupted counters */
6036        if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6037                REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6038                                            BNX2_HC_COMMAND_STATS_NOW);
6039
6040        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6041                if (CHIP_NUM(bp) == CHIP_NUM_5706)
6042                        bnx2_5706_serdes_timer(bp);
6043                else
6044                        bnx2_5708_serdes_timer(bp);
6045        }
6046
6047bnx2_restart_timer:
6048        mod_timer(&bp->timer, jiffies + bp->current_interval);
6049}
6050
6051static int
6052bnx2_request_irq(struct bnx2 *bp)
6053{
6054        unsigned long flags;
6055        struct bnx2_irq *irq;
6056        int rc = 0, i;
6057
6058        if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6059                flags = 0;
6060        else
6061                flags = IRQF_SHARED;
6062
6063        for (i = 0; i < bp->irq_nvecs; i++) {
6064                irq = &bp->irq_tbl[i];
6065                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6066                                 &bp->bnx2_napi[i]);
6067                if (rc)
6068                        break;
6069                irq->requested = 1;
6070        }
6071        return rc;
6072}
6073
6074static void
6075bnx2_free_irq(struct bnx2 *bp)
6076{
6077        struct bnx2_irq *irq;
6078        int i;
6079
6080        for (i = 0; i < bp->irq_nvecs; i++) {
6081                irq = &bp->irq_tbl[i];
6082                if (irq->requested)
6083                        free_irq(irq->vector, &bp->bnx2_napi[i]);
6084                irq->requested = 0;
6085        }
6086        if (bp->flags & BNX2_FLAG_USING_MSI)
6087                pci_disable_msi(bp->pdev);
6088        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6089                pci_disable_msix(bp->pdev);
6090
6091        bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6092}
6093
6094static void
6095bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6096{
6097        int i, rc;
6098        struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6099        struct net_device *dev = bp->dev;
6100        const int len = sizeof(bp->irq_tbl[0].name);
6101
6102        bnx2_setup_msix_tbl(bp);
6103        REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6104        REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6105        REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6106
6107        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6108                msix_ent[i].entry = i;
6109                msix_ent[i].vector = 0;
6110        }
6111
6112        rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6113        if (rc != 0)
6114                return;
6115
6116        bp->irq_nvecs = msix_vecs;
6117        bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6118        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6119                bp->irq_tbl[i].vector = msix_ent[i].vector;
6120                snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6121                bp->irq_tbl[i].handler = bnx2_msi_1shot;
6122        }
6123}
6124
6125static void
6126bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6127{
6128        int cpus = num_online_cpus();
6129        int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6130
6131        bp->irq_tbl[0].handler = bnx2_interrupt;
6132        strcpy(bp->irq_tbl[0].name, bp->dev->name);
6133        bp->irq_nvecs = 1;
6134        bp->irq_tbl[0].vector = bp->pdev->irq;
6135
6136        if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6137                bnx2_enable_msix(bp, msix_vecs);
6138
6139        if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6140            !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6141                if (pci_enable_msi(bp->pdev) == 0) {
6142                        bp->flags |= BNX2_FLAG_USING_MSI;
6143                        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6144                                bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6145                                bp->irq_tbl[0].handler = bnx2_msi_1shot;
6146                        } else
6147                                bp->irq_tbl[0].handler = bnx2_msi;
6148
6149                        bp->irq_tbl[0].vector = bp->pdev->irq;
6150                }
6151        }
6152
6153        bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6154        bp->dev->real_num_tx_queues = bp->num_tx_rings;
6155
6156        bp->num_rx_rings = bp->irq_nvecs;
6157}
6158
6159/* Called with rtnl_lock */
6160static int
6161bnx2_open(struct net_device *dev)
6162{
6163        struct bnx2 *bp = netdev_priv(dev);
6164        int rc;
6165
6166        netif_carrier_off(dev);
6167
6168        bnx2_set_power_state(bp, PCI_D0);
6169        bnx2_disable_int(bp);
6170
6171        bnx2_setup_int_mode(bp, disable_msi);
6172        bnx2_napi_enable(bp);
6173        rc = bnx2_alloc_mem(bp);
6174        if (rc)
6175                goto open_err;
6176
6177        rc = bnx2_request_irq(bp);
6178        if (rc)
6179                goto open_err;
6180
6181        rc = bnx2_init_nic(bp, 1);
6182        if (rc)
6183                goto open_err;
6184
6185        mod_timer(&bp->timer, jiffies + bp->current_interval);
6186
6187        atomic_set(&bp->intr_sem, 0);
6188
6189        bnx2_enable_int(bp);
6190
6191        if (bp->flags & BNX2_FLAG_USING_MSI) {
6192                /* Test MSI to make sure it is working
6193                 * If MSI test fails, go back to INTx mode
6194                 */
6195                if (bnx2_test_intr(bp) != 0) {
6196                        printk(KERN_WARNING PFX "%s: No interrupt was generated"
6197                               " using MSI, switching to INTx mode. Please"
6198                               " report this failure to the PCI maintainer"
6199                               " and include system chipset information.\n",
6200                               bp->dev->name);
6201
6202                        bnx2_disable_int(bp);
6203                        bnx2_free_irq(bp);
6204
6205                        bnx2_setup_int_mode(bp, 1);
6206
6207                        rc = bnx2_init_nic(bp, 0);
6208
6209                        if (!rc)
6210                                rc = bnx2_request_irq(bp);
6211
6212                        if (rc) {
6213                                del_timer_sync(&bp->timer);
6214                                goto open_err;
6215                        }
6216                        bnx2_enable_int(bp);
6217                }
6218        }
6219        if (bp->flags & BNX2_FLAG_USING_MSI)
6220                printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6221        else if (bp->flags & BNX2_FLAG_USING_MSIX)
6222                printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6223
6224        netif_tx_start_all_queues(dev);
6225
6226        return 0;
6227
6228open_err:
6229        bnx2_napi_disable(bp);
6230        bnx2_free_skbs(bp);
6231        bnx2_free_irq(bp);
6232        bnx2_free_mem(bp);
6233        return rc;
6234}
6235
6236static void
6237bnx2_reset_task(struct work_struct *work)
6238{
6239        struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6240
6241        if (!netif_running(bp->dev))
6242                return;
6243
6244        bnx2_netif_stop(bp);
6245
6246        bnx2_init_nic(bp, 1);
6247
6248        atomic_set(&bp->intr_sem, 1);
6249        bnx2_netif_start(bp);
6250}
6251
6252static void
6253bnx2_tx_timeout(struct net_device *dev)
6254{
6255        struct bnx2 *bp = netdev_priv(dev);
6256
6257        /* This allows the netif to be shutdown gracefully before resetting */
6258        schedule_work(&bp->reset_task);
6259}
6260
6261#ifdef BCM_VLAN
6262/* Called with rtnl_lock */
6263static void
6264bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6265{
6266        struct bnx2 *bp = netdev_priv(dev);
6267
6268        if (netif_running(dev))
6269                bnx2_netif_stop(bp);
6270
6271        bp->vlgrp = vlgrp;
6272
6273        if (!netif_running(dev))
6274                return;
6275
6276        bnx2_set_rx_mode(dev);
6277        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6278                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6279
6280        bnx2_netif_start(bp);
6281}
6282#endif
6283
6284/* Called with netif_tx_lock.
6285 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6286 * netif_wake_queue().
6287 */
6288static netdev_tx_t
6289bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6290{
6291        struct bnx2 *bp = netdev_priv(dev);
6292        dma_addr_t mapping;
6293        struct tx_bd *txbd;
6294        struct sw_tx_bd *tx_buf;
6295        u32 len, vlan_tag_flags, last_frag, mss;
6296        u16 prod, ring_prod;
6297        int i;
6298        struct bnx2_napi *bnapi;
6299        struct bnx2_tx_ring_info *txr;
6300        struct netdev_queue *txq;
6301        struct skb_shared_info *sp;
6302
6303        /*  Determine which tx ring we will be placed on */
6304        i = skb_get_queue_mapping(skb);
6305        bnapi = &bp->bnx2_napi[i];
6306        txr = &bnapi->tx_ring;
6307        txq = netdev_get_tx_queue(dev, i);
6308
6309        if (unlikely(bnx2_tx_avail(bp, txr) <
6310            (skb_shinfo(skb)->nr_frags + 1))) {
6311                netif_tx_stop_queue(txq);
6312                printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6313                        dev->name);
6314
6315                return NETDEV_TX_BUSY;
6316        }
6317        len = skb_headlen(skb);
6318        prod = txr->tx_prod;
6319        ring_prod = TX_RING_IDX(prod);
6320
6321        vlan_tag_flags = 0;
6322        if (skb->ip_summed == CHECKSUM_PARTIAL) {
6323                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6324        }
6325
6326#ifdef BCM_VLAN
6327        if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6328                vlan_tag_flags |=
6329                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6330        }
6331#endif
6332        if ((mss = skb_shinfo(skb)->gso_size)) {
6333                u32 tcp_opt_len;
6334                struct iphdr *iph;
6335
6336                vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6337
6338                tcp_opt_len = tcp_optlen(skb);
6339
6340                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6341                        u32 tcp_off = skb_transport_offset(skb) -
6342                                      sizeof(struct ipv6hdr) - ETH_HLEN;
6343
6344                        vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6345                                          TX_BD_FLAGS_SW_FLAGS;
6346                        if (likely(tcp_off == 0))
6347                                vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6348                        else {
6349                                tcp_off >>= 3;
6350                                vlan_tag_flags |= ((tcp_off & 0x3) <<
6351                                                   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6352                                                  ((tcp_off & 0x10) <<
6353                                                   TX_BD_FLAGS_TCP6_OFF4_SHL);
6354                                mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6355                        }
6356                } else {
6357                        iph = ip_hdr(skb);
6358                        if (tcp_opt_len || (iph->ihl > 5)) {
6359                                vlan_tag_flags |= ((iph->ihl - 5) +
6360                                                   (tcp_opt_len >> 2)) << 8;
6361                        }
6362                }
6363        } else
6364                mss = 0;
6365
6366        if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6367                dev_kfree_skb(skb);
6368                return NETDEV_TX_OK;
6369        }
6370
6371        sp = skb_shinfo(skb);
6372        mapping = sp->dma_head;
6373
6374        tx_buf = &txr->tx_buf_ring[ring_prod];
6375        tx_buf->skb = skb;
6376
6377        txbd = &txr->tx_desc_ring[ring_prod];
6378
6379        txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6380        txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6381        txbd->tx_bd_mss_nbytes = len | (mss << 16);
6382        txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6383
6384        last_frag = skb_shinfo(skb)->nr_frags;
6385        tx_buf->nr_frags = last_frag;
6386        tx_buf->is_gso = skb_is_gso(skb);
6387
6388        for (i = 0; i < last_frag; i++) {
6389                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6390
6391                prod = NEXT_TX_BD(prod);
6392                ring_prod = TX_RING_IDX(prod);
6393                txbd = &txr->tx_desc_ring[ring_prod];
6394
6395                len = frag->size;
6396                mapping = sp->dma_maps[i];
6397
6398                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6399                txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6400                txbd->tx_bd_mss_nbytes = len | (mss << 16);
6401                txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6402
6403        }
6404        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6405
6406        prod = NEXT_TX_BD(prod);
6407        txr->tx_prod_bseq += skb->len;
6408
6409        REG_WR16(bp, txr->tx_bidx_addr, prod);
6410        REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6411
6412        mmiowb();
6413
6414        txr->tx_prod = prod;
6415
6416        if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6417                netif_tx_stop_queue(txq);
6418                if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6419                        netif_tx_wake_queue(txq);
6420        }
6421
6422        return NETDEV_TX_OK;
6423}
6424
6425/* Called with rtnl_lock */
6426static int
6427bnx2_close(struct net_device *dev)
6428{
6429        struct bnx2 *bp = netdev_priv(dev);
6430
6431        cancel_work_sync(&bp->reset_task);
6432
6433        bnx2_disable_int_sync(bp);
6434        bnx2_napi_disable(bp);
6435        del_timer_sync(&bp->timer);
6436        bnx2_shutdown_chip(bp);
6437        bnx2_free_irq(bp);
6438        bnx2_free_skbs(bp);
6439        bnx2_free_mem(bp);
6440        bp->link_up = 0;
6441        netif_carrier_off(bp->dev);
6442        bnx2_set_power_state(bp, PCI_D3hot);
6443        return 0;
6444}
6445
6446#define GET_NET_STATS64(ctr)                                    \
6447        (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6448        (unsigned long) (ctr##_lo)
6449
6450#define GET_NET_STATS32(ctr)            \
6451        (ctr##_lo)
6452
6453#if (BITS_PER_LONG == 64)
6454#define GET_NET_STATS   GET_NET_STATS64
6455#else
6456#define GET_NET_STATS   GET_NET_STATS32
6457#endif
6458
6459static struct net_device_stats *
6460bnx2_get_stats(struct net_device *dev)
6461{
6462        struct bnx2 *bp = netdev_priv(dev);
6463        struct statistics_block *stats_blk = bp->stats_blk;
6464        struct net_device_stats *net_stats = &dev->stats;
6465
6466        if (bp->stats_blk == NULL) {
6467                return net_stats;
6468        }
6469        net_stats->rx_packets =
6470                GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6471                GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6472                GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6473
6474        net_stats->tx_packets =
6475                GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6476                GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6477                GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6478
6479        net_stats->rx_bytes =
6480                GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6481
6482        net_stats->tx_bytes =
6483                GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6484
6485        net_stats->multicast =
6486                GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6487
6488        net_stats->collisions =
6489                (unsigned long) stats_blk->stat_EtherStatsCollisions;
6490
6491        net_stats->rx_length_errors =
6492                (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6493                stats_blk->stat_EtherStatsOverrsizePkts);
6494
6495        net_stats->rx_over_errors =
6496                (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6497                stats_blk->stat_IfInMBUFDiscards);
6498
6499        net_stats->rx_frame_errors =
6500                (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6501
6502        net_stats->rx_crc_errors =
6503                (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6504
6505        net_stats->rx_errors = net_stats->rx_length_errors +
6506                net_stats->rx_over_errors + net_stats->rx_frame_errors +
6507                net_stats->rx_crc_errors;
6508
6509        net_stats->tx_aborted_errors =
6510                (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6511                stats_blk->stat_Dot3StatsLateCollisions);
6512
6513        if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6514            (CHIP_ID(bp) == CHIP_ID_5708_A0))
6515                net_stats->tx_carrier_errors = 0;
6516        else {
6517                net_stats->tx_carrier_errors =
6518                        (unsigned long)
6519                        stats_blk->stat_Dot3StatsCarrierSenseErrors;
6520        }
6521
6522        net_stats->tx_errors =
6523                (unsigned long)
6524                stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6525                +
6526                net_stats->tx_aborted_errors +
6527                net_stats->tx_carrier_errors;
6528
6529        net_stats->rx_missed_errors =
6530                (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6531                stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6532
6533        return net_stats;
6534}
6535
6536/* All ethtool functions called with rtnl_lock */
6537
6538static int
6539bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6540{
6541        struct bnx2 *bp = netdev_priv(dev);
6542        int support_serdes = 0, support_copper = 0;
6543
6544        cmd->supported = SUPPORTED_Autoneg;
6545        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6546                support_serdes = 1;
6547                support_copper = 1;
6548        } else if (bp->phy_port == PORT_FIBRE)
6549                support_serdes = 1;
6550        else
6551                support_copper = 1;
6552
6553        if (support_serdes) {
6554                cmd->supported |= SUPPORTED_1000baseT_Full |
6555                        SUPPORTED_FIBRE;
6556                if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6557                        cmd->supported |= SUPPORTED_2500baseX_Full;
6558
6559        }
6560        if (support_copper) {
6561                cmd->supported |= SUPPORTED_10baseT_Half |
6562                        SUPPORTED_10baseT_Full |
6563                        SUPPORTED_100baseT_Half |
6564                        SUPPORTED_100baseT_Full |
6565                        SUPPORTED_1000baseT_Full |
6566                        SUPPORTED_TP;
6567
6568        }
6569
6570        spin_lock_bh(&bp->phy_lock);
6571        cmd->port = bp->phy_port;
6572        cmd->advertising = bp->advertising;
6573
6574        if (bp->autoneg & AUTONEG_SPEED) {
6575                cmd->autoneg = AUTONEG_ENABLE;
6576        }
6577        else {
6578                cmd->autoneg = AUTONEG_DISABLE;
6579        }
6580
6581        if (netif_carrier_ok(dev)) {
6582                cmd->speed = bp->line_speed;
6583                cmd->duplex = bp->duplex;
6584        }
6585        else {
6586                cmd->speed = -1;
6587                cmd->duplex = -1;
6588        }
6589        spin_unlock_bh(&bp->phy_lock);
6590
6591        cmd->transceiver = XCVR_INTERNAL;
6592        cmd->phy_address = bp->phy_addr;
6593
6594        return 0;
6595}
6596
6597static int
6598bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6599{
6600        struct bnx2 *bp = netdev_priv(dev);
6601        u8 autoneg = bp->autoneg;
6602        u8 req_duplex = bp->req_duplex;
6603        u16 req_line_speed = bp->req_line_speed;
6604        u32 advertising = bp->advertising;
6605        int err = -EINVAL;
6606
6607        spin_lock_bh(&bp->phy_lock);
6608
6609        if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6610                goto err_out_unlock;
6611
6612        if (cmd->port != bp->phy_port &&
6613            !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6614                goto err_out_unlock;
6615
6616        /* If device is down, we can store the settings only if the user
6617         * is setting the currently active port.
6618         */
6619        if (!netif_running(dev) && cmd->port != bp->phy_port)
6620                goto err_out_unlock;
6621
6622        if (cmd->autoneg == AUTONEG_ENABLE) {
6623                autoneg |= AUTONEG_SPEED;
6624
6625                cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6626
6627                /* allow advertising 1 speed */
6628                if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6629                        (cmd->advertising == ADVERTISED_10baseT_Full) ||
6630                        (cmd->advertising == ADVERTISED_100baseT_Half) ||
6631                        (cmd->advertising == ADVERTISED_100baseT_Full)) {
6632
6633                        if (cmd->port == PORT_FIBRE)
6634                                goto err_out_unlock;
6635
6636                        advertising = cmd->advertising;
6637
6638                } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6639                        if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6640                            (cmd->port == PORT_TP))
6641                                goto err_out_unlock;
6642                } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6643                        advertising = cmd->advertising;
6644                else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6645                        goto err_out_unlock;
6646                else {
6647                        if (cmd->port == PORT_FIBRE)
6648                                advertising = ETHTOOL_ALL_FIBRE_SPEED;
6649                        else
6650                                advertising = ETHTOOL_ALL_COPPER_SPEED;
6651                }
6652                advertising |= ADVERTISED_Autoneg;
6653        }
6654        else {
6655                if (cmd->port == PORT_FIBRE) {
6656                        if ((cmd->speed != SPEED_1000 &&
6657                             cmd->speed != SPEED_2500) ||
6658                            (cmd->duplex != DUPLEX_FULL))
6659                                goto err_out_unlock;
6660
6661                        if (cmd->speed == SPEED_2500 &&
6662                            !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6663                                goto err_out_unlock;
6664                }
6665                else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6666                        goto err_out_unlock;
6667
6668                autoneg &= ~AUTONEG_SPEED;
6669                req_line_speed = cmd->speed;
6670                req_duplex = cmd->duplex;
6671                advertising = 0;
6672        }
6673
6674        bp->autoneg = autoneg;
6675        bp->advertising = advertising;
6676        bp->req_line_speed = req_line_speed;
6677        bp->req_duplex = req_duplex;
6678
6679        err = 0;
6680        /* If device is down, the new settings will be picked up when it is
6681         * brought up.
6682         */
6683        if (netif_running(dev))
6684                err = bnx2_setup_phy(bp, cmd->port);
6685
6686err_out_unlock:
6687        spin_unlock_bh(&bp->phy_lock);
6688
6689        return err;
6690}
6691
6692static void
6693bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6694{
6695        struct bnx2 *bp = netdev_priv(dev);
6696
6697        strcpy(info->driver, DRV_MODULE_NAME);
6698        strcpy(info->version, DRV_MODULE_VERSION);
6699        strcpy(info->bus_info, pci_name(bp->pdev));
6700        strcpy(info->fw_version, bp->fw_version);
6701}
6702
6703#define BNX2_REGDUMP_LEN                (32 * 1024)
6704
6705static int
6706bnx2_get_regs_len(struct net_device *dev)
6707{
6708        return BNX2_REGDUMP_LEN;
6709}
6710
6711static void
6712bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6713{
6714        u32 *p = _p, i, offset;
6715        u8 *orig_p = _p;
6716        struct bnx2 *bp = netdev_priv(dev);
6717        u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6718                                 0x0800, 0x0880, 0x0c00, 0x0c10,
6719                                 0x0c30, 0x0d08, 0x1000, 0x101c,
6720                                 0x1040, 0x1048, 0x1080, 0x10a4,
6721                                 0x1400, 0x1490, 0x1498, 0x14f0,
6722                                 0x1500, 0x155c, 0x1580, 0x15dc,
6723                                 0x1600, 0x1658, 0x1680, 0x16d8,
6724                                 0x1800, 0x1820, 0x1840, 0x1854,
6725                                 0x1880, 0x1894, 0x1900, 0x1984,
6726                                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6727                                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6728                                 0x2000, 0x2030, 0x23c0, 0x2400,
6729                                 0x2800, 0x2820, 0x2830, 0x2850,
6730                                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6731                                 0x3c00, 0x3c94, 0x4000, 0x4010,
6732                                 0x4080, 0x4090, 0x43c0, 0x4458,
6733                                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6734                                 0x4fc0, 0x5010, 0x53c0, 0x5444,
6735                                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6736                                 0x5fc0, 0x6000, 0x6400, 0x6428,
6737                                 0x6800, 0x6848, 0x684c, 0x6860,
6738                                 0x6888, 0x6910, 0x8000 };
6739
6740        regs->version = 0;
6741
6742        memset(p, 0, BNX2_REGDUMP_LEN);
6743
6744        if (!netif_running(bp->dev))
6745                return;
6746
6747        i = 0;
6748        offset = reg_boundaries[0];
6749        p += offset;
6750        while (offset < BNX2_REGDUMP_LEN) {
6751                *p++ = REG_RD(bp, offset);
6752                offset += 4;
6753                if (offset == reg_boundaries[i + 1]) {
6754                        offset = reg_boundaries[i + 2];
6755                        p = (u32 *) (orig_p + offset);
6756                        i += 2;
6757                }
6758        }
6759}
6760
6761static void
6762bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6763{
6764        struct bnx2 *bp = netdev_priv(dev);
6765
6766        if (bp->flags & BNX2_FLAG_NO_WOL) {
6767                wol->supported = 0;
6768                wol->wolopts = 0;
6769        }
6770        else {
6771                wol->supported = WAKE_MAGIC;
6772                if (bp->wol)
6773                        wol->wolopts = WAKE_MAGIC;
6774                else
6775                        wol->wolopts = 0;
6776        }
6777        memset(&wol->sopass, 0, sizeof(wol->sopass));
6778}
6779
6780static int
6781bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6782{
6783        struct bnx2 *bp = netdev_priv(dev);
6784
6785        if (wol->wolopts & ~WAKE_MAGIC)
6786                return -EINVAL;
6787
6788        if (wol->wolopts & WAKE_MAGIC) {
6789                if (bp->flags & BNX2_FLAG_NO_WOL)
6790                        return -EINVAL;
6791
6792                bp->wol = 1;
6793        }
6794        else {
6795                bp->wol = 0;
6796        }
6797        return 0;
6798}
6799
6800static int
6801bnx2_nway_reset(struct net_device *dev)
6802{
6803        struct bnx2 *bp = netdev_priv(dev);
6804        u32 bmcr;
6805
6806        if (!netif_running(dev))
6807                return -EAGAIN;
6808
6809        if (!(bp->autoneg & AUTONEG_SPEED)) {
6810                return -EINVAL;
6811        }
6812
6813        spin_lock_bh(&bp->phy_lock);
6814
6815        if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6816                int rc;
6817
6818                rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6819                spin_unlock_bh(&bp->phy_lock);
6820                return rc;
6821        }
6822
6823        /* Force a link down visible on the other side */
6824        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6825                bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6826                spin_unlock_bh(&bp->phy_lock);
6827
6828                msleep(20);
6829
6830                spin_lock_bh(&bp->phy_lock);
6831
6832                bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6833                bp->serdes_an_pending = 1;
6834                mod_timer(&bp->timer, jiffies + bp->current_interval);
6835        }
6836
6837        bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6838        bmcr &= ~BMCR_LOOPBACK;
6839        bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6840
6841        spin_unlock_bh(&bp->phy_lock);
6842
6843        return 0;
6844}
6845
6846static u32
6847bnx2_get_link(struct net_device *dev)
6848{
6849        struct bnx2 *bp = netdev_priv(dev);
6850
6851        return bp->link_up;
6852}
6853
6854static int
6855bnx2_get_eeprom_len(struct net_device *dev)
6856{
6857        struct bnx2 *bp = netdev_priv(dev);
6858
6859        if (bp->flash_info == NULL)
6860                return 0;
6861
6862        return (int) bp->flash_size;
6863}
6864
6865static int
6866bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6867                u8 *eebuf)
6868{
6869        struct bnx2 *bp = netdev_priv(dev);
6870        int rc;
6871
6872        if (!netif_running(dev))
6873                return -EAGAIN;
6874
6875        /* parameters already validated in ethtool_get_eeprom */
6876
6877        rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6878
6879        return rc;
6880}
6881
6882static int
6883bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6884                u8 *eebuf)
6885{
6886        struct bnx2 *bp = netdev_priv(dev);
6887        int rc;
6888
6889        if (!netif_running(dev))
6890                return -EAGAIN;
6891
6892        /* parameters already validated in ethtool_set_eeprom */
6893
6894        rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6895
6896        return rc;
6897}
6898
6899static int
6900bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6901{
6902        struct bnx2 *bp = netdev_priv(dev);
6903
6904        memset(coal, 0, sizeof(struct ethtool_coalesce));
6905
6906        coal->rx_coalesce_usecs = bp->rx_ticks;
6907        coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6908        coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6909        coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6910
6911        coal->tx_coalesce_usecs = bp->tx_ticks;
6912        coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6913        coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6914        coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6915
6916        coal->stats_block_coalesce_usecs = bp->stats_ticks;
6917
6918        return 0;
6919}
6920
6921static int
6922bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6923{
6924        struct bnx2 *bp = netdev_priv(dev);
6925
6926        bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6927        if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6928
6929        bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6930        if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6931
6932        bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6933        if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6934
6935        bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6936        if (bp->rx_quick_cons_trip_int > 0xff)
6937                bp->rx_quick_cons_trip_int = 0xff;
6938
6939        bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6940        if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6941
6942        bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6943        if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6944
6945        bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6946        if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6947
6948        bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6949        if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6950                0xff;
6951
6952        bp->stats_ticks = coal->stats_block_coalesce_usecs;
6953        if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
6954                if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6955                        bp->stats_ticks = USEC_PER_SEC;
6956        }
6957        if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6958                bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6959        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6960
6961        if (netif_running(bp->dev)) {
6962                bnx2_netif_stop(bp);
6963                bnx2_init_nic(bp, 0);
6964                bnx2_netif_start(bp);
6965        }
6966
6967        return 0;
6968}
6969
6970static void
6971bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6972{
6973        struct bnx2 *bp = netdev_priv(dev);
6974
6975        ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6976        ering->rx_mini_max_pending = 0;
6977        ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6978
6979        ering->rx_pending = bp->rx_ring_size;
6980        ering->rx_mini_pending = 0;
6981        ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6982
6983        ering->tx_max_pending = MAX_TX_DESC_CNT;
6984        ering->tx_pending = bp->tx_ring_size;
6985}
6986
6987static int
6988bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6989{
6990        if (netif_running(bp->dev)) {
6991                bnx2_netif_stop(bp);
6992                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6993                bnx2_free_skbs(bp);
6994                bnx2_free_mem(bp);
6995        }
6996
6997        bnx2_set_rx_ring_size(bp, rx);
6998        bp->tx_ring_size = tx;
6999
7000        if (netif_running(bp->dev)) {
7001                int rc;
7002
7003                rc = bnx2_alloc_mem(bp);
7004                if (!rc)
7005                        rc = bnx2_init_nic(bp, 0);
7006
7007                if (rc) {
7008                        bnx2_napi_enable(bp);
7009                        dev_close(bp->dev);
7010                        return rc;
7011                }
7012                bnx2_netif_start(bp);
7013        }
7014        return 0;
7015}
7016
7017static int
7018bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7019{
7020        struct bnx2 *bp = netdev_priv(dev);
7021        int rc;
7022
7023        if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7024                (ering->tx_pending > MAX_TX_DESC_CNT) ||
7025                (ering->tx_pending <= MAX_SKB_FRAGS)) {
7026
7027                return -EINVAL;
7028        }
7029        rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7030        return rc;
7031}
7032
7033static void
7034bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7035{
7036        struct bnx2 *bp = netdev_priv(dev);
7037
7038        epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7039        epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7040        epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7041}
7042
7043static int
7044bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7045{
7046        struct bnx2 *bp = netdev_priv(dev);
7047
7048        bp->req_flow_ctrl = 0;
7049        if (epause->rx_pause)
7050                bp->req_flow_ctrl |= FLOW_CTRL_RX;
7051        if (epause->tx_pause)
7052                bp->req_flow_ctrl |= FLOW_CTRL_TX;
7053
7054        if (epause->autoneg) {
7055                bp->autoneg |= AUTONEG_FLOW_CTRL;
7056        }
7057        else {
7058                bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7059        }
7060
7061        if (netif_running(dev)) {
7062                spin_lock_bh(&bp->phy_lock);
7063                bnx2_setup_phy(bp, bp->phy_port);
7064                spin_unlock_bh(&bp->phy_lock);
7065        }
7066
7067        return 0;
7068}
7069
7070static u32
7071bnx2_get_rx_csum(struct net_device *dev)
7072{
7073        struct bnx2 *bp = netdev_priv(dev);
7074
7075        return bp->rx_csum;
7076}
7077
7078static int
7079bnx2_set_rx_csum(struct net_device *dev, u32 data)
7080{
7081        struct bnx2 *bp = netdev_priv(dev);
7082
7083        bp->rx_csum = data;
7084        return 0;
7085}
7086
7087static int
7088bnx2_set_tso(struct net_device *dev, u32 data)
7089{
7090        struct bnx2 *bp = netdev_priv(dev);
7091
7092        if (data) {
7093                dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7094                if (CHIP_NUM(bp) == CHIP_NUM_5709)
7095                        dev->features |= NETIF_F_TSO6;
7096        } else
7097                dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7098                                   NETIF_F_TSO_ECN);
7099        return 0;
7100}
7101
7102static struct {
7103        char string[ETH_GSTRING_LEN];
7104} bnx2_stats_str_arr[] = {
7105        { "rx_bytes" },
7106        { "rx_error_bytes" },
7107        { "tx_bytes" },
7108        { "tx_error_bytes" },
7109        { "rx_ucast_packets" },
7110        { "rx_mcast_packets" },
7111        { "rx_bcast_packets" },
7112        { "tx_ucast_packets" },
7113        { "tx_mcast_packets" },
7114        { "tx_bcast_packets" },
7115        { "tx_mac_errors" },
7116        { "tx_carrier_errors" },
7117        { "rx_crc_errors" },
7118        { "rx_align_errors" },
7119        { "tx_single_collisions" },
7120        { "tx_multi_collisions" },
7121        { "tx_deferred" },
7122        { "tx_excess_collisions" },
7123        { "tx_late_collisions" },
7124        { "tx_total_collisions" },
7125        { "rx_fragments" },
7126        { "rx_jabbers" },
7127        { "rx_undersize_packets" },
7128        { "rx_oversize_packets" },
7129        { "rx_64_byte_packets" },
7130        { "rx_65_to_127_byte_packets" },
7131        { "rx_128_to_255_byte_packets" },
7132        { "rx_256_to_511_byte_packets" },
7133        { "rx_512_to_1023_byte_packets" },
7134        { "rx_1024_to_1522_byte_packets" },
7135        { "rx_1523_to_9022_byte_packets" },
7136        { "tx_64_byte_packets" },
7137        { "tx_65_to_127_byte_packets" },
7138        { "tx_128_to_255_byte_packets" },
7139        { "tx_256_to_511_byte_packets" },
7140        { "tx_512_to_1023_byte_packets" },
7141        { "tx_1024_to_1522_byte_packets" },
7142        { "tx_1523_to_9022_byte_packets" },
7143        { "rx_xon_frames" },
7144        { "rx_xoff_frames" },
7145        { "tx_xon_frames" },
7146        { "tx_xoff_frames" },
7147        { "rx_mac_ctrl_frames" },
7148        { "rx_filtered_packets" },
7149        { "rx_ftq_discards" },
7150        { "rx_discards" },
7151        { "rx_fw_discards" },
7152};
7153
7154#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7155                        sizeof(bnx2_stats_str_arr[0]))
7156
7157#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7158
7159static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7160    STATS_OFFSET32(stat_IfHCInOctets_hi),
7161    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7162    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7163    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7164    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7165    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7166    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7167    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7168    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7169    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7170    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7171    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7172    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7173    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7174    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7175    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7176    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7177    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7178    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7179    STATS_OFFSET32(stat_EtherStatsCollisions),
7180    STATS_OFFSET32(stat_EtherStatsFragments),
7181    STATS_OFFSET32(stat_EtherStatsJabbers),
7182    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7183    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7184    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7185    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7186    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7187    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7188    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7189    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7190    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7191    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7192    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7193    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7194    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7195    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7196    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7197    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7198    STATS_OFFSET32(stat_XonPauseFramesReceived),
7199    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7200    STATS_OFFSET32(stat_OutXonSent),
7201    STATS_OFFSET32(stat_OutXoffSent),
7202    STATS_OFFSET32(stat_MacControlFramesReceived),
7203    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7204    STATS_OFFSET32(stat_IfInFTQDiscards),
7205    STATS_OFFSET32(stat_IfInMBUFDiscards),
7206    STATS_OFFSET32(stat_FwRxDrop),
7207};
7208
7209/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7210 * skipped because of errata.
7211 */
7212static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7213        8,0,8,8,8,8,8,8,8,8,
7214        4,0,4,4,4,4,4,4,4,4,
7215        4,4,4,4,4,4,4,4,4,4,
7216        4,4,4,4,4,4,4,4,4,4,
7217        4,4,4,4,4,4,4,
7218};
7219
7220static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7221        8,0,8,8,8,8,8,8,8,8,
7222        4,4,4,4,4,4,4,4,4,4,
7223        4,4,4,4,4,4,4,4,4,4,
7224        4,4,4,4,4,4,4,4,4,4,
7225        4,4,4,4,4,4,4,
7226};
7227
7228#define BNX2_NUM_TESTS 6
7229
7230static struct {
7231        char string[ETH_GSTRING_LEN];
7232} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7233        { "register_test (offline)" },
7234        { "memory_test (offline)" },
7235        { "loopback_test (offline)" },
7236        { "nvram_test (online)" },
7237        { "interrupt_test (online)" },
7238        { "link_test (online)" },
7239};
7240
7241static int
7242bnx2_get_sset_count(struct net_device *dev, int sset)
7243{
7244        switch (sset) {
7245        case ETH_SS_TEST:
7246                return BNX2_NUM_TESTS;
7247        case ETH_SS_STATS:
7248                return BNX2_NUM_STATS;
7249        default:
7250                return -EOPNOTSUPP;
7251        }
7252}
7253
7254static void
7255bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7256{
7257        struct bnx2 *bp = netdev_priv(dev);
7258
7259        bnx2_set_power_state(bp, PCI_D0);
7260
7261        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7262        if (etest->flags & ETH_TEST_FL_OFFLINE) {
7263                int i;
7264
7265                bnx2_netif_stop(bp);
7266                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7267                bnx2_free_skbs(bp);
7268
7269                if (bnx2_test_registers(bp) != 0) {
7270                        buf[0] = 1;
7271                        etest->flags |= ETH_TEST_FL_FAILED;
7272                }
7273                if (bnx2_test_memory(bp) != 0) {
7274                        buf[1] = 1;
7275                        etest->flags |= ETH_TEST_FL_FAILED;
7276                }
7277                if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7278                        etest->flags |= ETH_TEST_FL_FAILED;
7279
7280                if (!netif_running(bp->dev))
7281                        bnx2_shutdown_chip(bp);
7282                else {
7283                        bnx2_init_nic(bp, 1);
7284                        bnx2_netif_start(bp);
7285                }
7286
7287                /* wait for link up */
7288                for (i = 0; i < 7; i++) {
7289                        if (bp->link_up)
7290                                break;
7291                        msleep_interruptible(1000);
7292                }
7293        }
7294
7295        if (bnx2_test_nvram(bp) != 0) {
7296                buf[3] = 1;
7297                etest->flags |= ETH_TEST_FL_FAILED;
7298        }
7299        if (bnx2_test_intr(bp) != 0) {
7300                buf[4] = 1;
7301                etest->flags |= ETH_TEST_FL_FAILED;
7302        }
7303
7304        if (bnx2_test_link(bp) != 0) {
7305                buf[5] = 1;
7306                etest->flags |= ETH_TEST_FL_FAILED;
7307
7308        }
7309        if (!netif_running(bp->dev))
7310                bnx2_set_power_state(bp, PCI_D3hot);
7311}
7312
7313static void
7314bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7315{
7316        switch (stringset) {
7317        case ETH_SS_STATS:
7318                memcpy(buf, bnx2_stats_str_arr,
7319                        sizeof(bnx2_stats_str_arr));
7320                break;
7321        case ETH_SS_TEST:
7322                memcpy(buf, bnx2_tests_str_arr,
7323                        sizeof(bnx2_tests_str_arr));
7324                break;
7325        }
7326}
7327
7328static void
7329bnx2_get_ethtool_stats(struct net_device *dev,
7330                struct ethtool_stats *stats, u64 *buf)
7331{
7332        struct bnx2 *bp = netdev_priv(dev);
7333        int i;
7334        u32 *hw_stats = (u32 *) bp->stats_blk;
7335        u8 *stats_len_arr = NULL;
7336
7337        if (hw_stats == NULL) {
7338                memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7339                return;
7340        }
7341
7342        if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7343            (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7344            (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7345            (CHIP_ID(bp) == CHIP_ID_5708_A0))
7346                stats_len_arr = bnx2_5706_stats_len_arr;
7347        else
7348                stats_len_arr = bnx2_5708_stats_len_arr;
7349
7350        for (i = 0; i < BNX2_NUM_STATS; i++) {
7351                if (stats_len_arr[i] == 0) {
7352                        /* skip this counter */
7353                        buf[i] = 0;
7354                        continue;
7355                }
7356                if (stats_len_arr[i] == 4) {
7357                        /* 4-byte counter */
7358                        buf[i] = (u64)
7359                                *(hw_stats + bnx2_stats_offset_arr[i]);
7360                        continue;
7361                }
7362                /* 8-byte counter */
7363                buf[i] = (((u64) *(hw_stats +
7364                                        bnx2_stats_offset_arr[i])) << 32) +
7365                                *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7366        }
7367}
7368
7369static int
7370bnx2_phys_id(struct net_device *dev, u32 data)
7371{
7372        struct bnx2 *bp = netdev_priv(dev);
7373        int i;
7374        u32 save;
7375
7376        bnx2_set_power_state(bp, PCI_D0);
7377
7378        if (data == 0)
7379                data = 2;
7380
7381        save = REG_RD(bp, BNX2_MISC_CFG);
7382        REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7383
7384        for (i = 0; i < (data * 2); i++) {
7385                if ((i % 2) == 0) {
7386                        REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7387                }
7388                else {
7389                        REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7390                                BNX2_EMAC_LED_1000MB_OVERRIDE |
7391                                BNX2_EMAC_LED_100MB_OVERRIDE |
7392                                BNX2_EMAC_LED_10MB_OVERRIDE |
7393                                BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7394                                BNX2_EMAC_LED_TRAFFIC);
7395                }
7396                msleep_interruptible(500);
7397                if (signal_pending(current))
7398                        break;
7399        }
7400        REG_WR(bp, BNX2_EMAC_LED, 0);
7401        REG_WR(bp, BNX2_MISC_CFG, save);
7402
7403        if (!netif_running(dev))
7404                bnx2_set_power_state(bp, PCI_D3hot);
7405
7406        return 0;
7407}
7408
7409static int
7410bnx2_set_tx_csum(struct net_device *dev, u32 data)
7411{
7412        struct bnx2 *bp = netdev_priv(dev);
7413
7414        if (CHIP_NUM(bp) == CHIP_NUM_5709)
7415                return (ethtool_op_set_tx_ipv6_csum(dev, data));
7416        else
7417                return (ethtool_op_set_tx_csum(dev, data));
7418}
7419
7420static const struct ethtool_ops bnx2_ethtool_ops = {
7421        .get_settings           = bnx2_get_settings,
7422        .set_settings           = bnx2_set_settings,
7423        .get_drvinfo            = bnx2_get_drvinfo,
7424        .get_regs_len           = bnx2_get_regs_len,
7425        .get_regs               = bnx2_get_regs,
7426        .get_wol                = bnx2_get_wol,
7427        .set_wol                = bnx2_set_wol,
7428        .nway_reset             = bnx2_nway_reset,
7429        .get_link               = bnx2_get_link,
7430        .get_eeprom_len         = bnx2_get_eeprom_len,
7431        .get_eeprom             = bnx2_get_eeprom,
7432        .set_eeprom             = bnx2_set_eeprom,
7433        .get_coalesce           = bnx2_get_coalesce,
7434        .set_coalesce           = bnx2_set_coalesce,
7435        .get_ringparam          = bnx2_get_ringparam,
7436        .set_ringparam          = bnx2_set_ringparam,
7437        .get_pauseparam         = bnx2_get_pauseparam,
7438        .set_pauseparam         = bnx2_set_pauseparam,
7439        .get_rx_csum            = bnx2_get_rx_csum,
7440        .set_rx_csum            = bnx2_set_rx_csum,
7441        .set_tx_csum            = bnx2_set_tx_csum,
7442        .set_sg                 = ethtool_op_set_sg,
7443        .set_tso                = bnx2_set_tso,
7444        .self_test              = bnx2_self_test,
7445        .get_strings            = bnx2_get_strings,
7446        .phys_id                = bnx2_phys_id,
7447        .get_ethtool_stats      = bnx2_get_ethtool_stats,
7448        .get_sset_count         = bnx2_get_sset_count,
7449};
7450
7451/* Called with rtnl_lock */
7452static int
7453bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7454{
7455        struct mii_ioctl_data *data = if_mii(ifr);
7456        struct bnx2 *bp = netdev_priv(dev);
7457        int err;
7458
7459        switch(cmd) {
7460        case SIOCGMIIPHY:
7461                data->phy_id = bp->phy_addr;
7462
7463                /* fallthru */
7464        case SIOCGMIIREG: {
7465                u32 mii_regval;
7466
7467                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7468                        return -EOPNOTSUPP;
7469
7470                if (!netif_running(dev))
7471                        return -EAGAIN;
7472
7473                spin_lock_bh(&bp->phy_lock);
7474                err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7475                spin_unlock_bh(&bp->phy_lock);
7476
7477                data->val_out = mii_regval;
7478
7479                return err;
7480        }
7481
7482        case SIOCSMIIREG:
7483                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7484                        return -EOPNOTSUPP;
7485
7486                if (!netif_running(dev))
7487                        return -EAGAIN;
7488
7489                spin_lock_bh(&bp->phy_lock);
7490                err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7491                spin_unlock_bh(&bp->phy_lock);
7492
7493                return err;
7494
7495        default:
7496                /* do nothing */
7497                break;
7498        }
7499        return -EOPNOTSUPP;
7500}
7501
7502/* Called with rtnl_lock */
7503static int
7504bnx2_change_mac_addr(struct net_device *dev, void *p)
7505{
7506        struct sockaddr *addr = p;
7507        struct bnx2 *bp = netdev_priv(dev);
7508
7509        if (!is_valid_ether_addr(addr->sa_data))
7510                return -EINVAL;
7511
7512        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7513        if (netif_running(dev))
7514                bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7515
7516        return 0;
7517}
7518
7519/* Called with rtnl_lock */
7520static int
7521bnx2_change_mtu(struct net_device *dev, int new_mtu)
7522{
7523        struct bnx2 *bp = netdev_priv(dev);
7524
7525        if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7526                ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7527                return -EINVAL;
7528
7529        dev->mtu = new_mtu;
7530        return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7531}
7532
7533#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7534static void
7535poll_bnx2(struct net_device *dev)
7536{
7537        struct bnx2 *bp = netdev_priv(dev);
7538        int i;
7539
7540        for (i = 0; i < bp->irq_nvecs; i++) {
7541                disable_irq(bp->irq_tbl[i].vector);
7542                bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7543                enable_irq(bp->irq_tbl[i].vector);
7544        }
7545}
7546#endif
7547
7548static void __devinit
7549bnx2_get_5709_media(struct bnx2 *bp)
7550{
7551        u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7552        u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7553        u32 strap;
7554
7555        if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7556                return;
7557        else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7558                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7559                return;
7560        }
7561
7562        if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7563                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7564        else
7565                strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7566
7567        if (PCI_FUNC(bp->pdev->devfn) == 0) {
7568                switch (strap) {
7569                case 0x4:
7570                case 0x5:
7571                case 0x6:
7572                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7573                        return;
7574                }
7575        } else {
7576                switch (strap) {
7577                case 0x1:
7578                case 0x2:
7579                case 0x4:
7580                        bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7581                        return;
7582                }
7583        }
7584}
7585
7586static void __devinit
7587bnx2_get_pci_speed(struct bnx2 *bp)
7588{
7589        u32 reg;
7590
7591        reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7592        if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7593                u32 clkreg;
7594
7595                bp->flags |= BNX2_FLAG_PCIX;
7596
7597                clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7598
7599                clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7600                switch (clkreg) {
7601                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7602                        bp->bus_speed_mhz = 133;
7603                        break;
7604
7605                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7606                        bp->bus_speed_mhz = 100;
7607                        break;
7608
7609                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7610                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7611                        bp->bus_speed_mhz = 66;
7612                        break;
7613
7614                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7615                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7616                        bp->bus_speed_mhz = 50;
7617                        break;
7618
7619                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7620                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7621                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7622                        bp->bus_speed_mhz = 33;
7623                        break;
7624                }
7625        }
7626        else {
7627                if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7628                        bp->bus_speed_mhz = 66;
7629                else
7630                        bp->bus_speed_mhz = 33;
7631        }
7632
7633        if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7634                bp->flags |= BNX2_FLAG_PCI_32BIT;
7635
7636}
7637
7638static int __devinit
7639bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7640{
7641        struct bnx2 *bp;
7642        unsigned long mem_len;
7643        int rc, i, j;
7644        u32 reg;
7645        u64 dma_mask, persist_dma_mask;
7646
7647        SET_NETDEV_DEV(dev, &pdev->dev);
7648        bp = netdev_priv(dev);
7649
7650        bp->flags = 0;
7651        bp->phy_flags = 0;
7652
7653        /* enable device (incl. PCI PM wakeup), and bus-mastering */
7654        rc = pci_enable_device(pdev);
7655        if (rc) {
7656                dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7657                goto err_out;
7658        }
7659
7660        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7661                dev_err(&pdev->dev,
7662                        "Cannot find PCI device base address, aborting.\n");
7663                rc = -ENODEV;
7664                goto err_out_disable;
7665        }
7666
7667        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7668        if (rc) {
7669                dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7670                goto err_out_disable;
7671        }
7672
7673        pci_set_master(pdev);
7674        pci_save_state(pdev);
7675
7676        bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7677        if (bp->pm_cap == 0) {
7678                dev_err(&pdev->dev,
7679                        "Cannot find power management capability, aborting.\n");
7680                rc = -EIO;
7681                goto err_out_release;
7682        }
7683
7684        bp->dev = dev;
7685        bp->pdev = pdev;
7686
7687        spin_lock_init(&bp->phy_lock);
7688        spin_lock_init(&bp->indirect_lock);
7689#ifdef BCM_CNIC
7690        mutex_init(&bp->cnic_lock);
7691#endif
7692        INIT_WORK(&bp->reset_task, bnx2_reset_task);
7693
7694        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7695        mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7696        dev->mem_end = dev->mem_start + mem_len;
7697        dev->irq = pdev->irq;
7698
7699        bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7700
7701        if (!bp->regview) {
7702                dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7703                rc = -ENOMEM;
7704                goto err_out_release;
7705        }
7706
7707        /* Configure byte swap and enable write to the reg_window registers.
7708         * Rely on CPU to do target byte swapping on big endian systems
7709         * The chip's target access swapping will not swap all accesses
7710         */
7711        pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7712                               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7713                               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7714
7715        bnx2_set_power_state(bp, PCI_D0);
7716
7717        bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7718
7719        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7720                if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7721                        dev_err(&pdev->dev,
7722                                "Cannot find PCIE capability, aborting.\n");
7723                        rc = -EIO;
7724                        goto err_out_unmap;
7725                }
7726                bp->flags |= BNX2_FLAG_PCIE;
7727                if (CHIP_REV(bp) == CHIP_REV_Ax)
7728                        bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7729        } else {
7730                bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7731                if (bp->pcix_cap == 0) {
7732                        dev_err(&pdev->dev,
7733                                "Cannot find PCIX capability, aborting.\n");
7734                        rc = -EIO;
7735                        goto err_out_unmap;
7736                }
7737                bp->flags |= BNX2_FLAG_BROKEN_STATS;
7738        }
7739
7740        if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7741                if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7742                        bp->flags |= BNX2_FLAG_MSIX_CAP;
7743        }
7744
7745        if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7746                if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7747                        bp->flags |= BNX2_FLAG_MSI_CAP;
7748        }
7749
7750        /* 5708 cannot support DMA addresses > 40-bit.  */
7751        if (CHIP_NUM(bp) == CHIP_NUM_5708)
7752                persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7753        else
7754                persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7755
7756        /* Configure DMA attributes. */
7757        if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7758                dev->features |= NETIF_F_HIGHDMA;
7759                rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7760                if (rc) {
7761                        dev_err(&pdev->dev,
7762                                "pci_set_consistent_dma_mask failed, aborting.\n");
7763                        goto err_out_unmap;
7764                }
7765        } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7766                dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7767                goto err_out_unmap;
7768        }
7769
7770        if (!(bp->flags & BNX2_FLAG_PCIE))
7771                bnx2_get_pci_speed(bp);
7772
7773        /* 5706A0 may falsely detect SERR and PERR. */
7774        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7775                reg = REG_RD(bp, PCI_COMMAND);
7776                reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7777                REG_WR(bp, PCI_COMMAND, reg);
7778        }
7779        else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7780                !(bp->flags & BNX2_FLAG_PCIX)) {
7781
7782                dev_err(&pdev->dev,
7783                        "5706 A1 can only be used in a PCIX bus, aborting.\n");
7784                goto err_out_unmap;
7785        }
7786
7787        bnx2_init_nvram(bp);
7788
7789        reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7790
7791        if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7792            BNX2_SHM_HDR_SIGNATURE_SIG) {
7793                u32 off = PCI_FUNC(pdev->devfn) << 2;
7794
7795                bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7796        } else
7797                bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7798
7799        /* Get the permanent MAC address.  First we need to make sure the
7800         * firmware is actually running.
7801         */
7802        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7803
7804        if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7805            BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7806                dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7807                rc = -ENODEV;
7808                goto err_out_unmap;
7809        }
7810
7811        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7812        for (i = 0, j = 0; i < 3; i++) {
7813                u8 num, k, skip0;
7814
7815                num = (u8) (reg >> (24 - (i * 8)));
7816                for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7817                        if (num >= k || !skip0 || k == 1) {
7818                                bp->fw_version[j++] = (num / k) + '0';
7819                                skip0 = 0;
7820                        }
7821                }
7822                if (i != 2)
7823                        bp->fw_version[j++] = '.';
7824        }
7825        reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7826        if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7827                bp->wol = 1;
7828
7829        if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7830                bp->flags |= BNX2_FLAG_ASF_ENABLE;
7831
7832                for (i = 0; i < 30; i++) {
7833                        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7834                        if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7835                                break;
7836                        msleep(10);
7837                }
7838        }
7839        reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7840        reg &= BNX2_CONDITION_MFW_RUN_MASK;
7841        if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7842            reg != BNX2_CONDITION_MFW_RUN_NONE) {
7843                u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7844
7845                bp->fw_version[j++] = ' ';
7846                for (i = 0; i < 3; i++) {
7847                        reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7848                        reg = swab32(reg);
7849                        memcpy(&bp->fw_version[j], &reg, 4);
7850                        j += 4;
7851                }
7852        }
7853
7854        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7855        bp->mac_addr[0] = (u8) (reg >> 8);
7856        bp->mac_addr[1] = (u8) reg;
7857
7858        reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7859        bp->mac_addr[2] = (u8) (reg >> 24);
7860        bp->mac_addr[3] = (u8) (reg >> 16);
7861        bp->mac_addr[4] = (u8) (reg >> 8);
7862        bp->mac_addr[5] = (u8) reg;
7863
7864        bp->tx_ring_size = MAX_TX_DESC_CNT;
7865        bnx2_set_rx_ring_size(bp, 255);
7866
7867        bp->rx_csum = 1;
7868
7869        bp->tx_quick_cons_trip_int = 2;
7870        bp->tx_quick_cons_trip = 20;
7871        bp->tx_ticks_int = 18;
7872        bp->tx_ticks = 80;
7873
7874        bp->rx_quick_cons_trip_int = 2;
7875        bp->rx_quick_cons_trip = 12;
7876        bp->rx_ticks_int = 18;
7877        bp->rx_ticks = 18;
7878
7879        bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7880
7881        bp->current_interval = BNX2_TIMER_INTERVAL;
7882
7883        bp->phy_addr = 1;
7884
7885        /* Disable WOL support if we are running on a SERDES chip. */
7886        if (CHIP_NUM(bp) == CHIP_NUM_5709)
7887                bnx2_get_5709_media(bp);
7888        else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7889                bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7890
7891        bp->phy_port = PORT_TP;
7892        if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7893                bp->phy_port = PORT_FIBRE;
7894                reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7895                if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7896                        bp->flags |= BNX2_FLAG_NO_WOL;
7897                        bp->wol = 0;
7898                }
7899                if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7900                        /* Don't do parallel detect on this board because of
7901                         * some board problems.  The link will not go down
7902                         * if we do parallel detect.
7903                         */
7904                        if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7905                            pdev->subsystem_device == 0x310c)
7906                                bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7907                } else {
7908                        bp->phy_addr = 2;
7909                        if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7910                                bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7911                }
7912        } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7913                   CHIP_NUM(bp) == CHIP_NUM_5708)
7914                bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7915        else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7916                 (CHIP_REV(bp) == CHIP_REV_Ax ||
7917                  CHIP_REV(bp) == CHIP_REV_Bx))
7918                bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7919
7920        bnx2_init_fw_cap(bp);
7921
7922        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7923            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7924            (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7925            !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7926                bp->flags |= BNX2_FLAG_NO_WOL;
7927                bp->wol = 0;
7928        }
7929
7930        if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7931                bp->tx_quick_cons_trip_int =
7932                        bp->tx_quick_cons_trip;
7933                bp->tx_ticks_int = bp->tx_ticks;
7934                bp->rx_quick_cons_trip_int =
7935                        bp->rx_quick_cons_trip;
7936                bp->rx_ticks_int = bp->rx_ticks;
7937                bp->comp_prod_trip_int = bp->comp_prod_trip;
7938                bp->com_ticks_int = bp->com_ticks;
7939                bp->cmd_ticks_int = bp->cmd_ticks;
7940        }
7941
7942        /* Disable MSI on 5706 if AMD 8132 bridge is found.
7943         *
7944         * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7945         * with byte enables disabled on the unused 32-bit word.  This is legal
7946         * but causes problems on the AMD 8132 which will eventually stop
7947         * responding after a while.
7948         *
7949         * AMD believes this incompatibility is unique to the 5706, and
7950         * prefers to locally disable MSI rather than globally disabling it.
7951         */
7952        if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7953                struct pci_dev *amd_8132 = NULL;
7954
7955                while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7956                                                  PCI_DEVICE_ID_AMD_8132_BRIDGE,
7957                                                  amd_8132))) {
7958
7959                        if (amd_8132->revision >= 0x10 &&
7960                            amd_8132->revision <= 0x13) {
7961                                disable_msi = 1;
7962                                pci_dev_put(amd_8132);
7963                                break;
7964                        }
7965                }
7966        }
7967
7968        bnx2_set_default_link(bp);
7969        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7970
7971        init_timer(&bp->timer);
7972        bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7973        bp->timer.data = (unsigned long) bp;
7974        bp->timer.function = bnx2_timer;
7975
7976        return 0;
7977
7978err_out_unmap:
7979        if (bp->regview) {
7980                iounmap(bp->regview);
7981                bp->regview = NULL;
7982        }
7983
7984err_out_release:
7985        pci_release_regions(pdev);
7986
7987err_out_disable:
7988        pci_disable_device(pdev);
7989        pci_set_drvdata(pdev, NULL);
7990
7991err_out:
7992        return rc;
7993}
7994
7995static char * __devinit
7996bnx2_bus_string(struct bnx2 *bp, char *str)
7997{
7998        char *s = str;
7999
8000        if (bp->flags & BNX2_FLAG_PCIE) {
8001                s += sprintf(s, "PCI Express");
8002        } else {
8003                s += sprintf(s, "PCI");
8004                if (bp->flags & BNX2_FLAG_PCIX)
8005                        s += sprintf(s, "-X");
8006                if (bp->flags & BNX2_FLAG_PCI_32BIT)
8007                        s += sprintf(s, " 32-bit");
8008                else
8009                        s += sprintf(s, " 64-bit");
8010                s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8011        }
8012        return str;
8013}
8014
8015static void __devinit
8016bnx2_init_napi(struct bnx2 *bp)
8017{
8018        int i;
8019
8020        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8021                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8022                int (*poll)(struct napi_struct *, int);
8023
8024                if (i == 0)
8025                        poll = bnx2_poll;
8026                else
8027                        poll = bnx2_poll_msix;
8028
8029                netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8030                bnapi->bp = bp;
8031        }
8032}
8033
8034static const struct net_device_ops bnx2_netdev_ops = {
8035        .ndo_open               = bnx2_open,
8036        .ndo_start_xmit         = bnx2_start_xmit,
8037        .ndo_stop               = bnx2_close,
8038        .ndo_get_stats          = bnx2_get_stats,
8039        .ndo_set_rx_mode        = bnx2_set_rx_mode,
8040        .ndo_do_ioctl           = bnx2_ioctl,
8041        .ndo_validate_addr      = eth_validate_addr,
8042        .ndo_set_mac_address    = bnx2_change_mac_addr,
8043        .ndo_change_mtu         = bnx2_change_mtu,
8044        .ndo_tx_timeout         = bnx2_tx_timeout,
8045#ifdef BCM_VLAN
8046        .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8047#endif
8048#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8049        .ndo_poll_controller    = poll_bnx2,
8050#endif
8051};
8052
8053static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8054{
8055#ifdef BCM_VLAN
8056        dev->vlan_features |= flags;
8057#endif
8058}
8059
8060static int __devinit
8061bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8062{
8063        static int version_printed = 0;
8064        struct net_device *dev = NULL;
8065        struct bnx2 *bp;
8066        int rc;
8067        char str[40];
8068
8069        if (version_printed++ == 0)
8070                printk(KERN_INFO "%s", version);
8071
8072        /* dev zeroed in init_etherdev */
8073        dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8074
8075        if (!dev)
8076                return -ENOMEM;
8077
8078        rc = bnx2_init_board(pdev, dev);
8079        if (rc < 0) {
8080                free_netdev(dev);
8081                return rc;
8082        }
8083
8084        dev->netdev_ops = &bnx2_netdev_ops;
8085        dev->watchdog_timeo = TX_TIMEOUT;
8086        dev->ethtool_ops = &bnx2_ethtool_ops;
8087
8088        bp = netdev_priv(dev);
8089        bnx2_init_napi(bp);
8090
8091        pci_set_drvdata(pdev, dev);
8092
8093        rc = bnx2_request_firmware(bp);
8094        if (rc)
8095                goto error;
8096
8097        memcpy(dev->dev_addr, bp->mac_addr, 6);
8098        memcpy(dev->perm_addr, bp->mac_addr, 6);
8099
8100        dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8101        vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8102        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8103                dev->features |= NETIF_F_IPV6_CSUM;
8104                vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8105        }
8106#ifdef BCM_VLAN
8107        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8108#endif
8109        dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8110        vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8111        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8112                dev->features |= NETIF_F_TSO6;
8113                vlan_features_add(dev, NETIF_F_TSO6);
8114        }
8115        if ((rc = register_netdev(dev))) {
8116                dev_err(&pdev->dev, "Cannot register net device\n");
8117                goto error;
8118        }
8119
8120        printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8121                "IRQ %d, node addr %pM\n",
8122                dev->name,
8123                board_info[ent->driver_data].name,
8124                ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8125                ((CHIP_ID(bp) & 0x0ff0) >> 4),
8126                bnx2_bus_string(bp, str),
8127                dev->base_addr,
8128                bp->pdev->irq, dev->dev_addr);
8129
8130        return 0;
8131
8132error:
8133        if (bp->mips_firmware)
8134                release_firmware(bp->mips_firmware);
8135        if (bp->rv2p_firmware)
8136                release_firmware(bp->rv2p_firmware);
8137
8138        if (bp->regview)
8139                iounmap(bp->regview);
8140        pci_release_regions(pdev);
8141        pci_disable_device(pdev);
8142        pci_set_drvdata(pdev, NULL);
8143        free_netdev(dev);
8144        return rc;
8145}
8146
8147static void __devexit
8148bnx2_remove_one(struct pci_dev *pdev)
8149{
8150        struct net_device *dev = pci_get_drvdata(pdev);
8151        struct bnx2 *bp = netdev_priv(dev);
8152
8153        flush_scheduled_work();
8154
8155        unregister_netdev(dev);
8156
8157        if (bp->mips_firmware)
8158                release_firmware(bp->mips_firmware);
8159        if (bp->rv2p_firmware)
8160                release_firmware(bp->rv2p_firmware);
8161
8162        if (bp->regview)
8163                iounmap(bp->regview);
8164
8165        free_netdev(dev);
8166        pci_release_regions(pdev);
8167        pci_disable_device(pdev);
8168        pci_set_drvdata(pdev, NULL);
8169}
8170
8171static int
8172bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8173{
8174        struct net_device *dev = pci_get_drvdata(pdev);
8175        struct bnx2 *bp = netdev_priv(dev);
8176
8177        /* PCI register 4 needs to be saved whether netif_running() or not.
8178         * MSI address and data need to be saved if using MSI and
8179         * netif_running().
8180         */
8181        pci_save_state(pdev);
8182        if (!netif_running(dev))
8183                return 0;
8184
8185        flush_scheduled_work();
8186        bnx2_netif_stop(bp);
8187        netif_device_detach(dev);
8188        del_timer_sync(&bp->timer);
8189        bnx2_shutdown_chip(bp);
8190        bnx2_free_skbs(bp);
8191        bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8192        return 0;
8193}
8194
8195static int
8196bnx2_resume(struct pci_dev *pdev)
8197{
8198        struct net_device *dev = pci_get_drvdata(pdev);
8199        struct bnx2 *bp = netdev_priv(dev);
8200
8201        pci_restore_state(pdev);
8202        if (!netif_running(dev))
8203                return 0;
8204
8205        bnx2_set_power_state(bp, PCI_D0);
8206        netif_device_attach(dev);
8207        bnx2_init_nic(bp, 1);
8208        bnx2_netif_start(bp);
8209        return 0;
8210}
8211
8212/**
8213 * bnx2_io_error_detected - called when PCI error is detected
8214 * @pdev: Pointer to PCI device
8215 * @state: The current pci connection state
8216 *
8217 * This function is called after a PCI bus error affecting
8218 * this device has been detected.
8219 */
8220static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8221                                               pci_channel_state_t state)
8222{
8223        struct net_device *dev = pci_get_drvdata(pdev);
8224        struct bnx2 *bp = netdev_priv(dev);
8225
8226        rtnl_lock();
8227        netif_device_detach(dev);
8228
8229        if (state == pci_channel_io_perm_failure) {
8230                rtnl_unlock();
8231                return PCI_ERS_RESULT_DISCONNECT;
8232        }
8233
8234        if (netif_running(dev)) {
8235                bnx2_netif_stop(bp);
8236                del_timer_sync(&bp->timer);
8237                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8238        }
8239
8240        pci_disable_device(pdev);
8241        rtnl_unlock();
8242
8243        /* Request a slot slot reset. */
8244        return PCI_ERS_RESULT_NEED_RESET;
8245}
8246
8247/**
8248 * bnx2_io_slot_reset - called after the pci bus has been reset.
8249 * @pdev: Pointer to PCI device
8250 *
8251 * Restart the card from scratch, as if from a cold-boot.
8252 */
8253static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8254{
8255        struct net_device *dev = pci_get_drvdata(pdev);
8256        struct bnx2 *bp = netdev_priv(dev);
8257
8258        rtnl_lock();
8259        if (pci_enable_device(pdev)) {
8260                dev_err(&pdev->dev,
8261                        "Cannot re-enable PCI device after reset.\n");
8262                rtnl_unlock();
8263                return PCI_ERS_RESULT_DISCONNECT;
8264        }
8265        pci_set_master(pdev);
8266        pci_restore_state(pdev);
8267
8268        if (netif_running(dev)) {
8269                bnx2_set_power_state(bp, PCI_D0);
8270                bnx2_init_nic(bp, 1);
8271        }
8272
8273        rtnl_unlock();
8274        return PCI_ERS_RESULT_RECOVERED;
8275}
8276
8277/**
8278 * bnx2_io_resume - called when traffic can start flowing again.
8279 * @pdev: Pointer to PCI device
8280 *
8281 * This callback is called when the error recovery driver tells us that
8282 * its OK to resume normal operation.
8283 */
8284static void bnx2_io_resume(struct pci_dev *pdev)
8285{
8286        struct net_device *dev = pci_get_drvdata(pdev);
8287        struct bnx2 *bp = netdev_priv(dev);
8288
8289        rtnl_lock();
8290        if (netif_running(dev))
8291                bnx2_netif_start(bp);
8292
8293        netif_device_attach(dev);
8294        rtnl_unlock();
8295}
8296
8297static struct pci_error_handlers bnx2_err_handler = {
8298        .error_detected = bnx2_io_error_detected,
8299        .slot_reset     = bnx2_io_slot_reset,
8300        .resume         = bnx2_io_resume,
8301};
8302
8303static struct pci_driver bnx2_pci_driver = {
8304        .name           = DRV_MODULE_NAME,
8305        .id_table       = bnx2_pci_tbl,
8306        .probe          = bnx2_init_one,
8307        .remove         = __devexit_p(bnx2_remove_one),
8308        .suspend        = bnx2_suspend,
8309        .resume         = bnx2_resume,
8310        .err_handler    = &bnx2_err_handler,
8311};
8312
8313static int __init bnx2_init(void)
8314{
8315        return pci_register_driver(&bnx2_pci_driver);
8316}
8317
8318static void __exit bnx2_cleanup(void)
8319{
8320        pci_unregister_driver(&bnx2_pci_driver);
8321}
8322
8323module_init(bnx2_init);
8324module_exit(bnx2_cleanup);
8325
8326
8327
8328