linux/drivers/net/ethernet/qlogic/qla3xxx.c
<<
>>
Prefs
   1/*
   2 * QLogic QLA3xxx NIC HBA Driver
   3 * Copyright (c)  2003-2006 QLogic Corporation
   4 *
   5 * See LICENSE.qla3xxx for copyright and licensing details.
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/kernel.h>
  11#include <linux/init.h>
  12#include <linux/types.h>
  13#include <linux/module.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/dma-mapping.h>
  17#include <linux/sched.h>
  18#include <linux/slab.h>
  19#include <linux/dmapool.h>
  20#include <linux/mempool.h>
  21#include <linux/spinlock.h>
  22#include <linux/kthread.h>
  23#include <linux/interrupt.h>
  24#include <linux/errno.h>
  25#include <linux/ioport.h>
  26#include <linux/ip.h>
  27#include <linux/in.h>
  28#include <linux/if_arp.h>
  29#include <linux/if_ether.h>
  30#include <linux/netdevice.h>
  31#include <linux/etherdevice.h>
  32#include <linux/ethtool.h>
  33#include <linux/skbuff.h>
  34#include <linux/rtnetlink.h>
  35#include <linux/if_vlan.h>
  36#include <linux/delay.h>
  37#include <linux/mm.h>
  38#include <linux/prefetch.h>
  39
  40#include "qla3xxx.h"
  41
  42#define DRV_NAME        "qla3xxx"
  43#define DRV_STRING      "QLogic ISP3XXX Network Driver"
  44#define DRV_VERSION     "v2.03.00-k5"
  45
  46static const char ql3xxx_driver_name[] = DRV_NAME;
  47static const char ql3xxx_driver_version[] = DRV_VERSION;
  48
  49#define TIMED_OUT_MSG                                                   \
  50"Timed out waiting for management port to get free before issuing command\n"
  51
  52MODULE_AUTHOR("QLogic Corporation");
  53MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  54MODULE_LICENSE("GPL");
  55MODULE_VERSION(DRV_VERSION);
  56
  57static const u32 default_msg
  58    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  59    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  60
  61static int debug = -1;          /* defaults above */
  62module_param(debug, int, 0);
  63MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  64
  65static int msi;
  66module_param(msi, int, 0);
  67MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  68
  69static const struct pci_device_id ql3xxx_pci_tbl[] = {
  70        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  71        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  72        /* required last entry */
  73        {0,}
  74};
  75
  76MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  77
  78/*
  79 *  These are the known PHY's which are used
  80 */
  81enum PHY_DEVICE_TYPE {
  82   PHY_TYPE_UNKNOWN   = 0,
  83   PHY_VITESSE_VSC8211,
  84   PHY_AGERE_ET1011C,
  85   MAX_PHY_DEV_TYPES
  86};
  87
  88struct PHY_DEVICE_INFO {
  89        const enum PHY_DEVICE_TYPE      phyDevice;
  90        const u32               phyIdOUI;
  91        const u16               phyIdModel;
  92        const char              *name;
  93};
  94
  95static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
  96        {PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  97        {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  98        {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  99};
 100
 101
 102/*
 103 * Caller must take hw_lock.
 104 */
 105static int ql_sem_spinlock(struct ql3_adapter *qdev,
 106                            u32 sem_mask, u32 sem_bits)
 107{
 108        struct ql3xxx_port_registers __iomem *port_regs =
 109                qdev->mem_map_registers;
 110        u32 value;
 111        unsigned int seconds = 3;
 112
 113        do {
 114                writel((sem_mask | sem_bits),
 115                       &port_regs->CommonRegs.semaphoreReg);
 116                value = readl(&port_regs->CommonRegs.semaphoreReg);
 117                if ((value & (sem_mask >> 16)) == sem_bits)
 118                        return 0;
 119                ssleep(1);
 120        } while (--seconds);
 121        return -1;
 122}
 123
 124static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 125{
 126        struct ql3xxx_port_registers __iomem *port_regs =
 127                qdev->mem_map_registers;
 128        writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
 129        readl(&port_regs->CommonRegs.semaphoreReg);
 130}
 131
 132static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 133{
 134        struct ql3xxx_port_registers __iomem *port_regs =
 135                qdev->mem_map_registers;
 136        u32 value;
 137
 138        writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
 139        value = readl(&port_regs->CommonRegs.semaphoreReg);
 140        return ((value & (sem_mask >> 16)) == sem_bits);
 141}
 142
 143/*
 144 * Caller holds hw_lock.
 145 */
 146static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 147{
 148        int i = 0;
 149
 150        while (i < 10) {
 151                if (i)
 152                        ssleep(1);
 153
 154                if (ql_sem_lock(qdev,
 155                                QL_DRVR_SEM_MASK,
 156                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
 157                                 * 2) << 1)) {
 158                        netdev_printk(KERN_DEBUG, qdev->ndev,
 159                                      "driver lock acquired\n");
 160                        return 1;
 161                }
 162        }
 163
 164        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
 165        return 0;
 166}
 167
 168static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
 169{
 170        struct ql3xxx_port_registers __iomem *port_regs =
 171                qdev->mem_map_registers;
 172
 173        writel(((ISP_CONTROL_NP_MASK << 16) | page),
 174                        &port_regs->CommonRegs.ispControlStatus);
 175        readl(&port_regs->CommonRegs.ispControlStatus);
 176        qdev->current_page = page;
 177}
 178
 179static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 180{
 181        u32 value;
 182        unsigned long hw_flags;
 183
 184        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 185        value = readl(reg);
 186        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 187
 188        return value;
 189}
 190
 191static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 192{
 193        return readl(reg);
 194}
 195
 196static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 197{
 198        u32 value;
 199        unsigned long hw_flags;
 200
 201        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 202
 203        if (qdev->current_page != 0)
 204                ql_set_register_page(qdev, 0);
 205        value = readl(reg);
 206
 207        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 208        return value;
 209}
 210
 211static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 212{
 213        if (qdev->current_page != 0)
 214                ql_set_register_page(qdev, 0);
 215        return readl(reg);
 216}
 217
 218static void ql_write_common_reg_l(struct ql3_adapter *qdev,
 219                                u32 __iomem *reg, u32 value)
 220{
 221        unsigned long hw_flags;
 222
 223        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 224        writel(value, reg);
 225        readl(reg);
 226        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 227}
 228
 229static void ql_write_common_reg(struct ql3_adapter *qdev,
 230                                u32 __iomem *reg, u32 value)
 231{
 232        writel(value, reg);
 233        readl(reg);
 234}
 235
 236static void ql_write_nvram_reg(struct ql3_adapter *qdev,
 237                                u32 __iomem *reg, u32 value)
 238{
 239        writel(value, reg);
 240        readl(reg);
 241        udelay(1);
 242}
 243
 244static void ql_write_page0_reg(struct ql3_adapter *qdev,
 245                               u32 __iomem *reg, u32 value)
 246{
 247        if (qdev->current_page != 0)
 248                ql_set_register_page(qdev, 0);
 249        writel(value, reg);
 250        readl(reg);
 251}
 252
 253/*
 254 * Caller holds hw_lock. Only called during init.
 255 */
 256static void ql_write_page1_reg(struct ql3_adapter *qdev,
 257                               u32 __iomem *reg, u32 value)
 258{
 259        if (qdev->current_page != 1)
 260                ql_set_register_page(qdev, 1);
 261        writel(value, reg);
 262        readl(reg);
 263}
 264
 265/*
 266 * Caller holds hw_lock. Only called during init.
 267 */
 268static void ql_write_page2_reg(struct ql3_adapter *qdev,
 269                               u32 __iomem *reg, u32 value)
 270{
 271        if (qdev->current_page != 2)
 272                ql_set_register_page(qdev, 2);
 273        writel(value, reg);
 274        readl(reg);
 275}
 276
 277static void ql_disable_interrupts(struct ql3_adapter *qdev)
 278{
 279        struct ql3xxx_port_registers __iomem *port_regs =
 280                qdev->mem_map_registers;
 281
 282        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 283                            (ISP_IMR_ENABLE_INT << 16));
 284
 285}
 286
 287static void ql_enable_interrupts(struct ql3_adapter *qdev)
 288{
 289        struct ql3xxx_port_registers __iomem *port_regs =
 290                qdev->mem_map_registers;
 291
 292        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 293                            ((0xff << 16) | ISP_IMR_ENABLE_INT));
 294
 295}
 296
 297static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
 298                                            struct ql_rcv_buf_cb *lrg_buf_cb)
 299{
 300        dma_addr_t map;
 301        int err;
 302        lrg_buf_cb->next = NULL;
 303
 304        if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
 305                qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
 306        } else {
 307                qdev->lrg_buf_free_tail->next = lrg_buf_cb;
 308                qdev->lrg_buf_free_tail = lrg_buf_cb;
 309        }
 310
 311        if (!lrg_buf_cb->skb) {
 312                lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
 313                                                   qdev->lrg_buffer_len);
 314                if (unlikely(!lrg_buf_cb->skb)) {
 315                        qdev->lrg_buf_skb_check++;
 316                } else {
 317                        /*
 318                         * We save some space to copy the ethhdr from first
 319                         * buffer
 320                         */
 321                        skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
 322                        map = pci_map_single(qdev->pdev,
 323                                             lrg_buf_cb->skb->data,
 324                                             qdev->lrg_buffer_len -
 325                                             QL_HEADER_SPACE,
 326                                             PCI_DMA_FROMDEVICE);
 327                        err = pci_dma_mapping_error(qdev->pdev, map);
 328                        if (err) {
 329                                netdev_err(qdev->ndev,
 330                                           "PCI mapping failed with error: %d\n",
 331                                           err);
 332                                dev_kfree_skb(lrg_buf_cb->skb);
 333                                lrg_buf_cb->skb = NULL;
 334
 335                                qdev->lrg_buf_skb_check++;
 336                                return;
 337                        }
 338
 339                        lrg_buf_cb->buf_phy_addr_low =
 340                            cpu_to_le32(LS_64BITS(map));
 341                        lrg_buf_cb->buf_phy_addr_high =
 342                            cpu_to_le32(MS_64BITS(map));
 343                        dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
 344                        dma_unmap_len_set(lrg_buf_cb, maplen,
 345                                          qdev->lrg_buffer_len -
 346                                          QL_HEADER_SPACE);
 347                }
 348        }
 349
 350        qdev->lrg_buf_free_count++;
 351}
 352
 353static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
 354                                                           *qdev)
 355{
 356        struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
 357
 358        if (lrg_buf_cb != NULL) {
 359                qdev->lrg_buf_free_head = lrg_buf_cb->next;
 360                if (qdev->lrg_buf_free_head == NULL)
 361                        qdev->lrg_buf_free_tail = NULL;
 362                qdev->lrg_buf_free_count--;
 363        }
 364
 365        return lrg_buf_cb;
 366}
 367
 368static u32 addrBits = EEPROM_NO_ADDR_BITS;
 369static u32 dataBits = EEPROM_NO_DATA_BITS;
 370
 371static void fm93c56a_deselect(struct ql3_adapter *qdev);
 372static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
 373                            unsigned short *value);
 374
 375/*
 376 * Caller holds hw_lock.
 377 */
 378static void fm93c56a_select(struct ql3_adapter *qdev)
 379{
 380        struct ql3xxx_port_registers __iomem *port_regs =
 381                        qdev->mem_map_registers;
 382        __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 383
 384        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
 385        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 386        ql_write_nvram_reg(qdev, spir,
 387                           ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 388}
 389
 390/*
 391 * Caller holds hw_lock.
 392 */
 393static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
 394{
 395        int i;
 396        u32 mask;
 397        u32 dataBit;
 398        u32 previousBit;
 399        struct ql3xxx_port_registers __iomem *port_regs =
 400                        qdev->mem_map_registers;
 401        __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 402
 403        /* Clock in a zero, then do the start bit */
 404        ql_write_nvram_reg(qdev, spir,
 405                           (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 406                            AUBURN_EEPROM_DO_1));
 407        ql_write_nvram_reg(qdev, spir,
 408                           (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 409                            AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
 410        ql_write_nvram_reg(qdev, spir,
 411                           (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 412                            AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
 413
 414        mask = 1 << (FM93C56A_CMD_BITS - 1);
 415        /* Force the previous data bit to be different */
 416        previousBit = 0xffff;
 417        for (i = 0; i < FM93C56A_CMD_BITS; i++) {
 418                dataBit = (cmd & mask)
 419                        ? AUBURN_EEPROM_DO_1
 420                        : AUBURN_EEPROM_DO_0;
 421                if (previousBit != dataBit) {
 422                        /* If the bit changed, change the DO state to match */
 423                        ql_write_nvram_reg(qdev, spir,
 424                                           (ISP_NVRAM_MASK |
 425                                            qdev->eeprom_cmd_data | dataBit));
 426                        previousBit = dataBit;
 427                }
 428                ql_write_nvram_reg(qdev, spir,
 429                                   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 430                                    dataBit | AUBURN_EEPROM_CLK_RISE));
 431                ql_write_nvram_reg(qdev, spir,
 432                                   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 433                                    dataBit | AUBURN_EEPROM_CLK_FALL));
 434                cmd = cmd << 1;
 435        }
 436
 437        mask = 1 << (addrBits - 1);
 438        /* Force the previous data bit to be different */
 439        previousBit = 0xffff;
 440        for (i = 0; i < addrBits; i++) {
 441                dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
 442                        : AUBURN_EEPROM_DO_0;
 443                if (previousBit != dataBit) {
 444                        /*
 445                         * If the bit changed, then change the DO state to
 446                         * match
 447                         */
 448                        ql_write_nvram_reg(qdev, spir,
 449                                           (ISP_NVRAM_MASK |
 450                                            qdev->eeprom_cmd_data | dataBit));
 451                        previousBit = dataBit;
 452                }
 453                ql_write_nvram_reg(qdev, spir,
 454                                   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 455                                    dataBit | AUBURN_EEPROM_CLK_RISE));
 456                ql_write_nvram_reg(qdev, spir,
 457                                   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 458                                    dataBit | AUBURN_EEPROM_CLK_FALL));
 459                eepromAddr = eepromAddr << 1;
 460        }
 461}
 462
 463/*
 464 * Caller holds hw_lock.
 465 */
 466static void fm93c56a_deselect(struct ql3_adapter *qdev)
 467{
 468        struct ql3xxx_port_registers __iomem *port_regs =
 469                        qdev->mem_map_registers;
 470        __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 471
 472        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
 473        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 474}
 475
 476/*
 477 * Caller holds hw_lock.
 478 */
 479static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
 480{
 481        int i;
 482        u32 data = 0;
 483        u32 dataBit;
 484        struct ql3xxx_port_registers __iomem *port_regs =
 485                        qdev->mem_map_registers;
 486        __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 487
 488        /* Read the data bits */
 489        /* The first bit is a dummy.  Clock right over it. */
 490        for (i = 0; i < dataBits; i++) {
 491                ql_write_nvram_reg(qdev, spir,
 492                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 493                                   AUBURN_EEPROM_CLK_RISE);
 494                ql_write_nvram_reg(qdev, spir,
 495                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 496                                   AUBURN_EEPROM_CLK_FALL);
 497                dataBit = (ql_read_common_reg(qdev, spir) &
 498                           AUBURN_EEPROM_DI_1) ? 1 : 0;
 499                data = (data << 1) | dataBit;
 500        }
 501        *value = (u16)data;
 502}
 503
 504/*
 505 * Caller holds hw_lock.
 506 */
 507static void eeprom_readword(struct ql3_adapter *qdev,
 508                            u32 eepromAddr, unsigned short *value)
 509{
 510        fm93c56a_select(qdev);
 511        fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
 512        fm93c56a_datain(qdev, value);
 513        fm93c56a_deselect(qdev);
 514}
 515
 516static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
 517{
 518        __le16 *p = (__le16 *)ndev->dev_addr;
 519        p[0] = cpu_to_le16(addr[0]);
 520        p[1] = cpu_to_le16(addr[1]);
 521        p[2] = cpu_to_le16(addr[2]);
 522}
 523
 524static int ql_get_nvram_params(struct ql3_adapter *qdev)
 525{
 526        u16 *pEEPROMData;
 527        u16 checksum = 0;
 528        u32 index;
 529        unsigned long hw_flags;
 530
 531        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 532
 533        pEEPROMData = (u16 *)&qdev->nvram_data;
 534        qdev->eeprom_cmd_data = 0;
 535        if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
 536                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 537                         2) << 10)) {
 538                pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
 539                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 540                return -1;
 541        }
 542
 543        for (index = 0; index < EEPROM_SIZE; index++) {
 544                eeprom_readword(qdev, index, pEEPROMData);
 545                checksum += *pEEPROMData;
 546                pEEPROMData++;
 547        }
 548        ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 549
 550        if (checksum != 0) {
 551                netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
 552                           checksum);
 553                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 554                return -1;
 555        }
 556
 557        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 558        return checksum;
 559}
 560
 561static const u32 PHYAddr[2] = {
 562        PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
 563};
 564
 565static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 566{
 567        struct ql3xxx_port_registers __iomem *port_regs =
 568                        qdev->mem_map_registers;
 569        u32 temp;
 570        int count = 1000;
 571
 572        while (count) {
 573                temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
 574                if (!(temp & MAC_MII_STATUS_BSY))
 575                        return 0;
 576                udelay(10);
 577                count--;
 578        }
 579        return -1;
 580}
 581
 582static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
 583{
 584        struct ql3xxx_port_registers __iomem *port_regs =
 585                        qdev->mem_map_registers;
 586        u32 scanControl;
 587
 588        if (qdev->numPorts > 1) {
 589                /* Auto scan will cycle through multiple ports */
 590                scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
 591        } else {
 592                scanControl = MAC_MII_CONTROL_SC;
 593        }
 594
 595        /*
 596         * Scan register 1 of PHY/PETBI,
 597         * Set up to scan both devices
 598         * The autoscan starts from the first register, completes
 599         * the last one before rolling over to the first
 600         */
 601        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 602                           PHYAddr[0] | MII_SCAN_REGISTER);
 603
 604        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 605                           (scanControl) |
 606                           ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
 607}
 608
 609static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
 610{
 611        u8 ret;
 612        struct ql3xxx_port_registers __iomem *port_regs =
 613                                        qdev->mem_map_registers;
 614
 615        /* See if scan mode is enabled before we turn it off */
 616        if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
 617            (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
 618                /* Scan is enabled */
 619                ret = 1;
 620        } else {
 621                /* Scan is disabled */
 622                ret = 0;
 623        }
 624
 625        /*
 626         * When disabling scan mode you must first change the MII register
 627         * address
 628         */
 629        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 630                           PHYAddr[0] | MII_SCAN_REGISTER);
 631
 632        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 633                           ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
 634                             MAC_MII_CONTROL_RC) << 16));
 635
 636        return ret;
 637}
 638
 639static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
 640                               u16 regAddr, u16 value, u32 phyAddr)
 641{
 642        struct ql3xxx_port_registers __iomem *port_regs =
 643                        qdev->mem_map_registers;
 644        u8 scanWasEnabled;
 645
 646        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 647
 648        if (ql_wait_for_mii_ready(qdev)) {
 649                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 650                return -1;
 651        }
 652
 653        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 654                           phyAddr | regAddr);
 655
 656        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 657
 658        /* Wait for write to complete 9/10/04 SJP */
 659        if (ql_wait_for_mii_ready(qdev)) {
 660                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 661                return -1;
 662        }
 663
 664        if (scanWasEnabled)
 665                ql_mii_enable_scan_mode(qdev);
 666
 667        return 0;
 668}
 669
 670static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
 671                              u16 *value, u32 phyAddr)
 672{
 673        struct ql3xxx_port_registers __iomem *port_regs =
 674                        qdev->mem_map_registers;
 675        u8 scanWasEnabled;
 676        u32 temp;
 677
 678        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 679
 680        if (ql_wait_for_mii_ready(qdev)) {
 681                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 682                return -1;
 683        }
 684
 685        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 686                           phyAddr | regAddr);
 687
 688        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 689                           (MAC_MII_CONTROL_RC << 16));
 690
 691        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 692                           (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 693
 694        /* Wait for the read to complete */
 695        if (ql_wait_for_mii_ready(qdev)) {
 696                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 697                return -1;
 698        }
 699
 700        temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 701        *value = (u16) temp;
 702
 703        if (scanWasEnabled)
 704                ql_mii_enable_scan_mode(qdev);
 705
 706        return 0;
 707}
 708
 709static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 710{
 711        struct ql3xxx_port_registers __iomem *port_regs =
 712                        qdev->mem_map_registers;
 713
 714        ql_mii_disable_scan_mode(qdev);
 715
 716        if (ql_wait_for_mii_ready(qdev)) {
 717                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 718                return -1;
 719        }
 720
 721        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 722                           qdev->PHYAddr | regAddr);
 723
 724        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 725
 726        /* Wait for write to complete. */
 727        if (ql_wait_for_mii_ready(qdev)) {
 728                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 729                return -1;
 730        }
 731
 732        ql_mii_enable_scan_mode(qdev);
 733
 734        return 0;
 735}
 736
 737static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
 738{
 739        u32 temp;
 740        struct ql3xxx_port_registers __iomem *port_regs =
 741                        qdev->mem_map_registers;
 742
 743        ql_mii_disable_scan_mode(qdev);
 744
 745        if (ql_wait_for_mii_ready(qdev)) {
 746                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 747                return -1;
 748        }
 749
 750        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 751                           qdev->PHYAddr | regAddr);
 752
 753        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 754                           (MAC_MII_CONTROL_RC << 16));
 755
 756        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 757                           (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 758
 759        /* Wait for the read to complete */
 760        if (ql_wait_for_mii_ready(qdev)) {
 761                netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 762                return -1;
 763        }
 764
 765        temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 766        *value = (u16) temp;
 767
 768        ql_mii_enable_scan_mode(qdev);
 769
 770        return 0;
 771}
 772
 773static void ql_petbi_reset(struct ql3_adapter *qdev)
 774{
 775        ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
 776}
 777
 778static void ql_petbi_start_neg(struct ql3_adapter *qdev)
 779{
 780        u16 reg;
 781
 782        /* Enable Auto-negotiation sense */
 783        ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
 784        reg |= PETBI_TBI_AUTO_SENSE;
 785        ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
 786
 787        ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
 788                         PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
 789
 790        ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
 791                         PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 792                         PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
 793
 794}
 795
 796static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
 797{
 798        ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
 799                            PHYAddr[qdev->mac_index]);
 800}
 801
 802static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
 803{
 804        u16 reg;
 805
 806        /* Enable Auto-negotiation sense */
 807        ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
 808                           PHYAddr[qdev->mac_index]);
 809        reg |= PETBI_TBI_AUTO_SENSE;
 810        ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
 811                            PHYAddr[qdev->mac_index]);
 812
 813        ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
 814                            PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
 815                            PHYAddr[qdev->mac_index]);
 816
 817        ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
 818                            PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 819                            PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
 820                            PHYAddr[qdev->mac_index]);
 821}
 822
 823static void ql_petbi_init(struct ql3_adapter *qdev)
 824{
 825        ql_petbi_reset(qdev);
 826        ql_petbi_start_neg(qdev);
 827}
 828
 829static void ql_petbi_init_ex(struct ql3_adapter *qdev)
 830{
 831        ql_petbi_reset_ex(qdev);
 832        ql_petbi_start_neg_ex(qdev);
 833}
 834
 835static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
 836{
 837        u16 reg;
 838
 839        if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
 840                return 0;
 841
 842        return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
 843}
 844
 845static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
 846{
 847        netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
 848        /* power down device bit 11 = 1 */
 849        ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
 850        /* enable diagnostic mode bit 2 = 1 */
 851        ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
 852        /* 1000MB amplitude adjust (see Agere errata) */
 853        ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
 854        /* 1000MB amplitude adjust (see Agere errata) */
 855        ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
 856        /* 100MB amplitude adjust (see Agere errata) */
 857        ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
 858        /* 100MB amplitude adjust (see Agere errata) */
 859        ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
 860        /* 10MB amplitude adjust (see Agere errata) */
 861        ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
 862        /* 10MB amplitude adjust (see Agere errata) */
 863        ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
 864        /* point to hidden reg 0x2806 */
 865        ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
 866        /* Write new PHYAD w/bit 5 set */
 867        ql_mii_write_reg_ex(qdev, 0x11,
 868                            0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
 869        /*
 870         * Disable diagnostic mode bit 2 = 0
 871         * Power up device bit 11 = 0
 872         * Link up (on) and activity (blink)
 873         */
 874        ql_mii_write_reg(qdev, 0x12, 0x840a);
 875        ql_mii_write_reg(qdev, 0x00, 0x1140);
 876        ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
 877}
 878
 879static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
 880                                       u16 phyIdReg0, u16 phyIdReg1)
 881{
 882        enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
 883        u32   oui;
 884        u16   model;
 885        int i;
 886
 887        if (phyIdReg0 == 0xffff)
 888                return result;
 889
 890        if (phyIdReg1 == 0xffff)
 891                return result;
 892
 893        /* oui is split between two registers */
 894        oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
 895
 896        model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
 897
 898        /* Scan table for this PHY */
 899        for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
 900                if ((oui == PHY_DEVICES[i].phyIdOUI) &&
 901                    (model == PHY_DEVICES[i].phyIdModel)) {
 902                        netdev_info(qdev->ndev, "Phy: %s\n",
 903                                    PHY_DEVICES[i].name);
 904                        result = PHY_DEVICES[i].phyDevice;
 905                        break;
 906                }
 907        }
 908
 909        return result;
 910}
 911
 912static int ql_phy_get_speed(struct ql3_adapter *qdev)
 913{
 914        u16 reg;
 915
 916        switch (qdev->phyType) {
 917        case PHY_AGERE_ET1011C: {
 918                if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
 919                        return 0;
 920
 921                reg = (reg >> 8) & 3;
 922                break;
 923        }
 924        default:
 925                if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 926                        return 0;
 927
 928                reg = (((reg & 0x18) >> 3) & 3);
 929        }
 930
 931        switch (reg) {
 932        case 2:
 933                return SPEED_1000;
 934        case 1:
 935                return SPEED_100;
 936        case 0:
 937                return SPEED_10;
 938        default:
 939                return -1;
 940        }
 941}
 942
 943static int ql_is_full_dup(struct ql3_adapter *qdev)
 944{
 945        u16 reg;
 946
 947        switch (qdev->phyType) {
 948        case PHY_AGERE_ET1011C: {
 949                if (ql_mii_read_reg(qdev, 0x1A, &reg))
 950                        return 0;
 951
 952                return ((reg & 0x0080) && (reg & 0x1000)) != 0;
 953        }
 954        case PHY_VITESSE_VSC8211:
 955        default: {
 956                if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 957                        return 0;
 958                return (reg & PHY_AUX_DUPLEX_STAT) != 0;
 959        }
 960        }
 961}
 962
 963static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
 964{
 965        u16 reg;
 966
 967        if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
 968                return 0;
 969
 970        return (reg & PHY_NEG_PAUSE) != 0;
 971}
 972
 973static int PHY_Setup(struct ql3_adapter *qdev)
 974{
 975        u16   reg1;
 976        u16   reg2;
 977        bool  agereAddrChangeNeeded = false;
 978        u32 miiAddr = 0;
 979        int err;
 980
 981        /*  Determine the PHY we are using by reading the ID's */
 982        err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
 983        if (err != 0) {
 984                netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
 985                return err;
 986        }
 987
 988        err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
 989        if (err != 0) {
 990                netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
 991                return err;
 992        }
 993
 994        /*  Check if we have a Agere PHY */
 995        if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
 996
 997                /* Determine which MII address we should be using
 998                   determined by the index of the card */
 999                if (qdev->mac_index == 0)
1000                        miiAddr = MII_AGERE_ADDR_1;
1001                else
1002                        miiAddr = MII_AGERE_ADDR_2;
1003
1004                err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1005                if (err != 0) {
1006                        netdev_err(qdev->ndev,
1007                                   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1008                        return err;
1009                }
1010
1011                err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1012                if (err != 0) {
1013                        netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1014                        return err;
1015                }
1016
1017                /*  We need to remember to initialize the Agere PHY */
1018                agereAddrChangeNeeded = true;
1019        }
1020
1021        /*  Determine the particular PHY we have on board to apply
1022            PHY specific initializations */
1023        qdev->phyType = getPhyType(qdev, reg1, reg2);
1024
1025        if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1026                /* need this here so address gets changed */
1027                phyAgereSpecificInit(qdev, miiAddr);
1028        } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1029                netdev_err(qdev->ndev, "PHY is unknown\n");
1030                return -EIO;
1031        }
1032
1033        return 0;
1034}
1035
1036/*
1037 * Caller holds hw_lock.
1038 */
1039static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1040{
1041        struct ql3xxx_port_registers __iomem *port_regs =
1042                        qdev->mem_map_registers;
1043        u32 value;
1044
1045        if (enable)
1046                value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1047        else
1048                value = (MAC_CONFIG_REG_PE << 16);
1049
1050        if (qdev->mac_index)
1051                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1052        else
1053                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1054}
1055
1056/*
1057 * Caller holds hw_lock.
1058 */
1059static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1060{
1061        struct ql3xxx_port_registers __iomem *port_regs =
1062                        qdev->mem_map_registers;
1063        u32 value;
1064
1065        if (enable)
1066                value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1067        else
1068                value = (MAC_CONFIG_REG_SR << 16);
1069
1070        if (qdev->mac_index)
1071                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1072        else
1073                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1074}
1075
1076/*
1077 * Caller holds hw_lock.
1078 */
1079static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1080{
1081        struct ql3xxx_port_registers __iomem *port_regs =
1082                        qdev->mem_map_registers;
1083        u32 value;
1084
1085        if (enable)
1086                value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1087        else
1088                value = (MAC_CONFIG_REG_GM << 16);
1089
1090        if (qdev->mac_index)
1091                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1092        else
1093                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1094}
1095
1096/*
1097 * Caller holds hw_lock.
1098 */
1099static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1100{
1101        struct ql3xxx_port_registers __iomem *port_regs =
1102                        qdev->mem_map_registers;
1103        u32 value;
1104
1105        if (enable)
1106                value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1107        else
1108                value = (MAC_CONFIG_REG_FD << 16);
1109
1110        if (qdev->mac_index)
1111                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1112        else
1113                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1114}
1115
1116/*
1117 * Caller holds hw_lock.
1118 */
1119static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1120{
1121        struct ql3xxx_port_registers __iomem *port_regs =
1122                        qdev->mem_map_registers;
1123        u32 value;
1124
1125        if (enable)
1126                value =
1127                    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1128                     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1129        else
1130                value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1131
1132        if (qdev->mac_index)
1133                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1134        else
1135                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1136}
1137
1138/*
1139 * Caller holds hw_lock.
1140 */
1141static int ql_is_fiber(struct ql3_adapter *qdev)
1142{
1143        struct ql3xxx_port_registers __iomem *port_regs =
1144                        qdev->mem_map_registers;
1145        u32 bitToCheck = 0;
1146        u32 temp;
1147
1148        switch (qdev->mac_index) {
1149        case 0:
1150                bitToCheck = PORT_STATUS_SM0;
1151                break;
1152        case 1:
1153                bitToCheck = PORT_STATUS_SM1;
1154                break;
1155        }
1156
1157        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1158        return (temp & bitToCheck) != 0;
1159}
1160
1161static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1162{
1163        u16 reg;
1164        ql_mii_read_reg(qdev, 0x00, &reg);
1165        return (reg & 0x1000) != 0;
1166}
1167
1168/*
1169 * Caller holds hw_lock.
1170 */
1171static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1172{
1173        struct ql3xxx_port_registers __iomem *port_regs =
1174                        qdev->mem_map_registers;
1175        u32 bitToCheck = 0;
1176        u32 temp;
1177
1178        switch (qdev->mac_index) {
1179        case 0:
1180                bitToCheck = PORT_STATUS_AC0;
1181                break;
1182        case 1:
1183                bitToCheck = PORT_STATUS_AC1;
1184                break;
1185        }
1186
1187        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1188        if (temp & bitToCheck) {
1189                netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1190                return 1;
1191        }
1192        netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1193        return 0;
1194}
1195
1196/*
1197 *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1198 */
1199static int ql_is_neg_pause(struct ql3_adapter *qdev)
1200{
1201        if (ql_is_fiber(qdev))
1202                return ql_is_petbi_neg_pause(qdev);
1203        else
1204                return ql_is_phy_neg_pause(qdev);
1205}
1206
1207static int ql_auto_neg_error(struct ql3_adapter *qdev)
1208{
1209        struct ql3xxx_port_registers __iomem *port_regs =
1210                        qdev->mem_map_registers;
1211        u32 bitToCheck = 0;
1212        u32 temp;
1213
1214        switch (qdev->mac_index) {
1215        case 0:
1216                bitToCheck = PORT_STATUS_AE0;
1217                break;
1218        case 1:
1219                bitToCheck = PORT_STATUS_AE1;
1220                break;
1221        }
1222        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1223        return (temp & bitToCheck) != 0;
1224}
1225
1226static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1227{
1228        if (ql_is_fiber(qdev))
1229                return SPEED_1000;
1230        else
1231                return ql_phy_get_speed(qdev);
1232}
1233
1234static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1235{
1236        if (ql_is_fiber(qdev))
1237                return 1;
1238        else
1239                return ql_is_full_dup(qdev);
1240}
1241
1242/*
1243 * Caller holds hw_lock.
1244 */
1245static int ql_link_down_detect(struct ql3_adapter *qdev)
1246{
1247        struct ql3xxx_port_registers __iomem *port_regs =
1248                        qdev->mem_map_registers;
1249        u32 bitToCheck = 0;
1250        u32 temp;
1251
1252        switch (qdev->mac_index) {
1253        case 0:
1254                bitToCheck = ISP_CONTROL_LINK_DN_0;
1255                break;
1256        case 1:
1257                bitToCheck = ISP_CONTROL_LINK_DN_1;
1258                break;
1259        }
1260
1261        temp =
1262            ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1263        return (temp & bitToCheck) != 0;
1264}
1265
1266/*
1267 * Caller holds hw_lock.
1268 */
1269static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1270{
1271        struct ql3xxx_port_registers __iomem *port_regs =
1272                        qdev->mem_map_registers;
1273
1274        switch (qdev->mac_index) {
1275        case 0:
1276                ql_write_common_reg(qdev,
1277                                    &port_regs->CommonRegs.ispControlStatus,
1278                                    (ISP_CONTROL_LINK_DN_0) |
1279                                    (ISP_CONTROL_LINK_DN_0 << 16));
1280                break;
1281
1282        case 1:
1283                ql_write_common_reg(qdev,
1284                                    &port_regs->CommonRegs.ispControlStatus,
1285                                    (ISP_CONTROL_LINK_DN_1) |
1286                                    (ISP_CONTROL_LINK_DN_1 << 16));
1287                break;
1288
1289        default:
1290                return 1;
1291        }
1292
1293        return 0;
1294}
1295
1296/*
1297 * Caller holds hw_lock.
1298 */
1299static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1300{
1301        struct ql3xxx_port_registers __iomem *port_regs =
1302                        qdev->mem_map_registers;
1303        u32 bitToCheck = 0;
1304        u32 temp;
1305
1306        switch (qdev->mac_index) {
1307        case 0:
1308                bitToCheck = PORT_STATUS_F1_ENABLED;
1309                break;
1310        case 1:
1311                bitToCheck = PORT_STATUS_F3_ENABLED;
1312                break;
1313        default:
1314                break;
1315        }
1316
1317        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1318        if (temp & bitToCheck) {
1319                netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1320                             "not link master\n");
1321                return 0;
1322        }
1323
1324        netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1325        return 1;
1326}
1327
1328static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1329{
1330        ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1331                            PHYAddr[qdev->mac_index]);
1332}
1333
1334static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1335{
1336        u16 reg;
1337        u16 portConfiguration;
1338
1339        if (qdev->phyType == PHY_AGERE_ET1011C)
1340                ql_mii_write_reg(qdev, 0x13, 0x0000);
1341                                        /* turn off external loopback */
1342
1343        if (qdev->mac_index == 0)
1344                portConfiguration =
1345                        qdev->nvram_data.macCfg_port0.portConfiguration;
1346        else
1347                portConfiguration =
1348                        qdev->nvram_data.macCfg_port1.portConfiguration;
1349
1350        /*  Some HBA's in the field are set to 0 and they need to
1351            be reinterpreted with a default value */
1352        if (portConfiguration == 0)
1353                portConfiguration = PORT_CONFIG_DEFAULT;
1354
1355        /* Set the 1000 advertisements */
1356        ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1357                           PHYAddr[qdev->mac_index]);
1358        reg &= ~PHY_GIG_ALL_PARAMS;
1359
1360        if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1361                if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1362                        reg |= PHY_GIG_ADV_1000F;
1363                else
1364                        reg |= PHY_GIG_ADV_1000H;
1365        }
1366
1367        ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1368                            PHYAddr[qdev->mac_index]);
1369
1370        /* Set the 10/100 & pause negotiation advertisements */
1371        ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1372                           PHYAddr[qdev->mac_index]);
1373        reg &= ~PHY_NEG_ALL_PARAMS;
1374
1375        if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1376                reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1377
1378        if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1379                if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1380                        reg |= PHY_NEG_ADV_100F;
1381
1382                if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1383                        reg |= PHY_NEG_ADV_10F;
1384        }
1385
1386        if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1387                if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1388                        reg |= PHY_NEG_ADV_100H;
1389
1390                if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1391                        reg |= PHY_NEG_ADV_10H;
1392        }
1393
1394        if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1395                reg |= 1;
1396
1397        ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1398                            PHYAddr[qdev->mac_index]);
1399
1400        ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1401
1402        ql_mii_write_reg_ex(qdev, CONTROL_REG,
1403                            reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1404                            PHYAddr[qdev->mac_index]);
1405}
1406
1407static void ql_phy_init_ex(struct ql3_adapter *qdev)
1408{
1409        ql_phy_reset_ex(qdev);
1410        PHY_Setup(qdev);
1411        ql_phy_start_neg_ex(qdev);
1412}
1413
1414/*
1415 * Caller holds hw_lock.
1416 */
1417static u32 ql_get_link_state(struct ql3_adapter *qdev)
1418{
1419        struct ql3xxx_port_registers __iomem *port_regs =
1420                        qdev->mem_map_registers;
1421        u32 bitToCheck = 0;
1422        u32 temp, linkState;
1423
1424        switch (qdev->mac_index) {
1425        case 0:
1426                bitToCheck = PORT_STATUS_UP0;
1427                break;
1428        case 1:
1429                bitToCheck = PORT_STATUS_UP1;
1430                break;
1431        }
1432
1433        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1434        if (temp & bitToCheck)
1435                linkState = LS_UP;
1436        else
1437                linkState = LS_DOWN;
1438
1439        return linkState;
1440}
1441
1442static int ql_port_start(struct ql3_adapter *qdev)
1443{
1444        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1445                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1446                         2) << 7)) {
1447                netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1448                return -1;
1449        }
1450
1451        if (ql_is_fiber(qdev)) {
1452                ql_petbi_init(qdev);
1453        } else {
1454                /* Copper port */
1455                ql_phy_init_ex(qdev);
1456        }
1457
1458        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1459        return 0;
1460}
1461
1462static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1463{
1464
1465        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1466                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1467                         2) << 7))
1468                return -1;
1469
1470        if (!ql_auto_neg_error(qdev)) {
1471                if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1472                        /* configure the MAC */
1473                        netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1474                                     "Configuring link\n");
1475                        ql_mac_cfg_soft_reset(qdev, 1);
1476                        ql_mac_cfg_gig(qdev,
1477                                       (ql_get_link_speed
1478                                        (qdev) ==
1479                                        SPEED_1000));
1480                        ql_mac_cfg_full_dup(qdev,
1481                                            ql_is_link_full_dup
1482                                            (qdev));
1483                        ql_mac_cfg_pause(qdev,
1484                                         ql_is_neg_pause
1485                                         (qdev));
1486                        ql_mac_cfg_soft_reset(qdev, 0);
1487
1488                        /* enable the MAC */
1489                        netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1490                                     "Enabling mac\n");
1491                        ql_mac_enable(qdev, 1);
1492                }
1493
1494                qdev->port_link_state = LS_UP;
1495                netif_start_queue(qdev->ndev);
1496                netif_carrier_on(qdev->ndev);
1497                netif_info(qdev, link, qdev->ndev,
1498                           "Link is up at %d Mbps, %s duplex\n",
1499                           ql_get_link_speed(qdev),
1500                           ql_is_link_full_dup(qdev) ? "full" : "half");
1501
1502        } else {        /* Remote error detected */
1503
1504                if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1505                        netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1506                                     "Remote error detected. Calling ql_port_start()\n");
1507                        /*
1508                         * ql_port_start() is shared code and needs
1509                         * to lock the PHY on it's own.
1510                         */
1511                        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1512                        if (ql_port_start(qdev))        /* Restart port */
1513                                return -1;
1514                        return 0;
1515                }
1516        }
1517        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1518        return 0;
1519}
1520
1521static void ql_link_state_machine_work(struct work_struct *work)
1522{
1523        struct ql3_adapter *qdev =
1524                container_of(work, struct ql3_adapter, link_state_work.work);
1525
1526        u32 curr_link_state;
1527        unsigned long hw_flags;
1528
1529        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1530
1531        curr_link_state = ql_get_link_state(qdev);
1532
1533        if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1534                netif_info(qdev, link, qdev->ndev,
1535                           "Reset in progress, skip processing link state\n");
1536
1537                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1538
1539                /* Restart timer on 2 second interval. */
1540                mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1541
1542                return;
1543        }
1544
1545        switch (qdev->port_link_state) {
1546        default:
1547                if (test_bit(QL_LINK_MASTER, &qdev->flags))
1548                        ql_port_start(qdev);
1549                qdev->port_link_state = LS_DOWN;
1550                /* Fall Through */
1551
1552        case LS_DOWN:
1553                if (curr_link_state == LS_UP) {
1554                        netif_info(qdev, link, qdev->ndev, "Link is up\n");
1555                        if (ql_is_auto_neg_complete(qdev))
1556                                ql_finish_auto_neg(qdev);
1557
1558                        if (qdev->port_link_state == LS_UP)
1559                                ql_link_down_detect_clear(qdev);
1560
1561                        qdev->port_link_state = LS_UP;
1562                }
1563                break;
1564
1565        case LS_UP:
1566                /*
1567                 * See if the link is currently down or went down and came
1568                 * back up
1569                 */
1570                if (curr_link_state == LS_DOWN) {
1571                        netif_info(qdev, link, qdev->ndev, "Link is down\n");
1572                        qdev->port_link_state = LS_DOWN;
1573                }
1574                if (ql_link_down_detect(qdev))
1575                        qdev->port_link_state = LS_DOWN;
1576                break;
1577        }
1578        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1579
1580        /* Restart timer on 2 second interval. */
1581        mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1582}
1583
1584/*
1585 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1586 */
1587static void ql_get_phy_owner(struct ql3_adapter *qdev)
1588{
1589        if (ql_this_adapter_controls_port(qdev))
1590                set_bit(QL_LINK_MASTER, &qdev->flags);
1591        else
1592                clear_bit(QL_LINK_MASTER, &qdev->flags);
1593}
1594
1595/*
1596 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1597 */
1598static void ql_init_scan_mode(struct ql3_adapter *qdev)
1599{
1600        ql_mii_enable_scan_mode(qdev);
1601
1602        if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1603                if (ql_this_adapter_controls_port(qdev))
1604                        ql_petbi_init_ex(qdev);
1605        } else {
1606                if (ql_this_adapter_controls_port(qdev))
1607                        ql_phy_init_ex(qdev);
1608        }
1609}
1610
1611/*
1612 * MII_Setup needs to be called before taking the PHY out of reset
1613 * so that the management interface clock speed can be set properly.
1614 * It would be better if we had a way to disable MDC until after the
1615 * PHY is out of reset, but we don't have that capability.
1616 */
1617static int ql_mii_setup(struct ql3_adapter *qdev)
1618{
1619        u32 reg;
1620        struct ql3xxx_port_registers __iomem *port_regs =
1621                        qdev->mem_map_registers;
1622
1623        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1624                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1625                         2) << 7))
1626                return -1;
1627
1628        if (qdev->device_id == QL3032_DEVICE_ID)
1629                ql_write_page0_reg(qdev,
1630                        &port_regs->macMIIMgmtControlReg, 0x0f00000);
1631
1632        /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1633        reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1634
1635        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1636                           reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1637
1638        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1639        return 0;
1640}
1641
1642#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full |     \
1643                                 SUPPORTED_FIBRE |              \
1644                                 SUPPORTED_Autoneg)
1645#define SUPPORTED_TP_MODES      (SUPPORTED_10baseT_Half |       \
1646                                 SUPPORTED_10baseT_Full |       \
1647                                 SUPPORTED_100baseT_Half |      \
1648                                 SUPPORTED_100baseT_Full |      \
1649                                 SUPPORTED_1000baseT_Half |     \
1650                                 SUPPORTED_1000baseT_Full |     \
1651                                 SUPPORTED_Autoneg |            \
1652                                 SUPPORTED_TP)                  \
1653
1654static u32 ql_supported_modes(struct ql3_adapter *qdev)
1655{
1656        if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1657                return SUPPORTED_OPTICAL_MODES;
1658
1659        return SUPPORTED_TP_MODES;
1660}
1661
1662static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1663{
1664        int status;
1665        unsigned long hw_flags;
1666        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1667        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1668                            (QL_RESOURCE_BITS_BASE_CODE |
1669                             (qdev->mac_index) * 2) << 7)) {
1670                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1671                return 0;
1672        }
1673        status = ql_is_auto_cfg(qdev);
1674        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1675        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1676        return status;
1677}
1678
1679static u32 ql_get_speed(struct ql3_adapter *qdev)
1680{
1681        u32 status;
1682        unsigned long hw_flags;
1683        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1684        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1685                            (QL_RESOURCE_BITS_BASE_CODE |
1686                             (qdev->mac_index) * 2) << 7)) {
1687                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1688                return 0;
1689        }
1690        status = ql_get_link_speed(qdev);
1691        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1692        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1693        return status;
1694}
1695
1696static int ql_get_full_dup(struct ql3_adapter *qdev)
1697{
1698        int status;
1699        unsigned long hw_flags;
1700        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1701        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1702                            (QL_RESOURCE_BITS_BASE_CODE |
1703                             (qdev->mac_index) * 2) << 7)) {
1704                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1705                return 0;
1706        }
1707        status = ql_is_link_full_dup(qdev);
1708        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1709        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1710        return status;
1711}
1712
1713static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1714{
1715        struct ql3_adapter *qdev = netdev_priv(ndev);
1716
1717        ecmd->transceiver = XCVR_INTERNAL;
1718        ecmd->supported = ql_supported_modes(qdev);
1719
1720        if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1721                ecmd->port = PORT_FIBRE;
1722        } else {
1723                ecmd->port = PORT_TP;
1724                ecmd->phy_address = qdev->PHYAddr;
1725        }
1726        ecmd->advertising = ql_supported_modes(qdev);
1727        ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1728        ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1729        ecmd->duplex = ql_get_full_dup(qdev);
1730        return 0;
1731}
1732
1733static void ql_get_drvinfo(struct net_device *ndev,
1734                           struct ethtool_drvinfo *drvinfo)
1735{
1736        struct ql3_adapter *qdev = netdev_priv(ndev);
1737        strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1738        strlcpy(drvinfo->version, ql3xxx_driver_version,
1739                sizeof(drvinfo->version));
1740        strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1741                sizeof(drvinfo->bus_info));
1742}
1743
1744static u32 ql_get_msglevel(struct net_device *ndev)
1745{
1746        struct ql3_adapter *qdev = netdev_priv(ndev);
1747        return qdev->msg_enable;
1748}
1749
1750static void ql_set_msglevel(struct net_device *ndev, u32 value)
1751{
1752        struct ql3_adapter *qdev = netdev_priv(ndev);
1753        qdev->msg_enable = value;
1754}
1755
1756static void ql_get_pauseparam(struct net_device *ndev,
1757                              struct ethtool_pauseparam *pause)
1758{
1759        struct ql3_adapter *qdev = netdev_priv(ndev);
1760        struct ql3xxx_port_registers __iomem *port_regs =
1761                qdev->mem_map_registers;
1762
1763        u32 reg;
1764        if (qdev->mac_index == 0)
1765                reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1766        else
1767                reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1768
1769        pause->autoneg  = ql_get_auto_cfg_status(qdev);
1770        pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1771        pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1772}
1773
1774static const struct ethtool_ops ql3xxx_ethtool_ops = {
1775        .get_settings = ql_get_settings,
1776        .get_drvinfo = ql_get_drvinfo,
1777        .get_link = ethtool_op_get_link,
1778        .get_msglevel = ql_get_msglevel,
1779        .set_msglevel = ql_set_msglevel,
1780        .get_pauseparam = ql_get_pauseparam,
1781};
1782
1783static int ql_populate_free_queue(struct ql3_adapter *qdev)
1784{
1785        struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1786        dma_addr_t map;
1787        int err;
1788
1789        while (lrg_buf_cb) {
1790                if (!lrg_buf_cb->skb) {
1791                        lrg_buf_cb->skb =
1792                                netdev_alloc_skb(qdev->ndev,
1793                                                 qdev->lrg_buffer_len);
1794                        if (unlikely(!lrg_buf_cb->skb)) {
1795                                netdev_printk(KERN_DEBUG, qdev->ndev,
1796                                              "Failed netdev_alloc_skb()\n");
1797                                break;
1798                        } else {
1799                                /*
1800                                 * We save some space to copy the ethhdr from
1801                                 * first buffer
1802                                 */
1803                                skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1804                                map = pci_map_single(qdev->pdev,
1805                                                     lrg_buf_cb->skb->data,
1806                                                     qdev->lrg_buffer_len -
1807                                                     QL_HEADER_SPACE,
1808                                                     PCI_DMA_FROMDEVICE);
1809
1810                                err = pci_dma_mapping_error(qdev->pdev, map);
1811                                if (err) {
1812                                        netdev_err(qdev->ndev,
1813                                                   "PCI mapping failed with error: %d\n",
1814                                                   err);
1815                                        dev_kfree_skb(lrg_buf_cb->skb);
1816                                        lrg_buf_cb->skb = NULL;
1817                                        break;
1818                                }
1819
1820
1821                                lrg_buf_cb->buf_phy_addr_low =
1822                                        cpu_to_le32(LS_64BITS(map));
1823                                lrg_buf_cb->buf_phy_addr_high =
1824                                        cpu_to_le32(MS_64BITS(map));
1825                                dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1826                                dma_unmap_len_set(lrg_buf_cb, maplen,
1827                                                  qdev->lrg_buffer_len -
1828                                                  QL_HEADER_SPACE);
1829                                --qdev->lrg_buf_skb_check;
1830                                if (!qdev->lrg_buf_skb_check)
1831                                        return 1;
1832                        }
1833                }
1834                lrg_buf_cb = lrg_buf_cb->next;
1835        }
1836        return 0;
1837}
1838
1839/*
1840 * Caller holds hw_lock.
1841 */
1842static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1843{
1844        struct ql3xxx_port_registers __iomem *port_regs =
1845                qdev->mem_map_registers;
1846
1847        if (qdev->small_buf_release_cnt >= 16) {
1848                while (qdev->small_buf_release_cnt >= 16) {
1849                        qdev->small_buf_q_producer_index++;
1850
1851                        if (qdev->small_buf_q_producer_index ==
1852                            NUM_SBUFQ_ENTRIES)
1853                                qdev->small_buf_q_producer_index = 0;
1854                        qdev->small_buf_release_cnt -= 8;
1855                }
1856                wmb();
1857                writel(qdev->small_buf_q_producer_index,
1858                        &port_regs->CommonRegs.rxSmallQProducerIndex);
1859        }
1860}
1861
1862/*
1863 * Caller holds hw_lock.
1864 */
1865static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1866{
1867        struct bufq_addr_element *lrg_buf_q_ele;
1868        int i;
1869        struct ql_rcv_buf_cb *lrg_buf_cb;
1870        struct ql3xxx_port_registers __iomem *port_regs =
1871                qdev->mem_map_registers;
1872
1873        if ((qdev->lrg_buf_free_count >= 8) &&
1874            (qdev->lrg_buf_release_cnt >= 16)) {
1875
1876                if (qdev->lrg_buf_skb_check)
1877                        if (!ql_populate_free_queue(qdev))
1878                                return;
1879
1880                lrg_buf_q_ele = qdev->lrg_buf_next_free;
1881
1882                while ((qdev->lrg_buf_release_cnt >= 16) &&
1883                       (qdev->lrg_buf_free_count >= 8)) {
1884
1885                        for (i = 0; i < 8; i++) {
1886                                lrg_buf_cb =
1887                                    ql_get_from_lrg_buf_free_list(qdev);
1888                                lrg_buf_q_ele->addr_high =
1889                                    lrg_buf_cb->buf_phy_addr_high;
1890                                lrg_buf_q_ele->addr_low =
1891                                    lrg_buf_cb->buf_phy_addr_low;
1892                                lrg_buf_q_ele++;
1893
1894                                qdev->lrg_buf_release_cnt--;
1895                        }
1896
1897                        qdev->lrg_buf_q_producer_index++;
1898
1899                        if (qdev->lrg_buf_q_producer_index ==
1900                            qdev->num_lbufq_entries)
1901                                qdev->lrg_buf_q_producer_index = 0;
1902
1903                        if (qdev->lrg_buf_q_producer_index ==
1904                            (qdev->num_lbufq_entries - 1)) {
1905                                lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1906                        }
1907                }
1908                wmb();
1909                qdev->lrg_buf_next_free = lrg_buf_q_ele;
1910                writel(qdev->lrg_buf_q_producer_index,
1911                        &port_regs->CommonRegs.rxLargeQProducerIndex);
1912        }
1913}
1914
1915static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1916                                   struct ob_mac_iocb_rsp *mac_rsp)
1917{
1918        struct ql_tx_buf_cb *tx_cb;
1919        int i;
1920
1921        if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1922                netdev_warn(qdev->ndev,
1923                            "Frame too short but it was padded and sent\n");
1924        }
1925
1926        tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1927
1928        /*  Check the transmit response flags for any errors */
1929        if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1930                netdev_err(qdev->ndev,
1931                           "Frame too short to be legal, frame not sent\n");
1932
1933                qdev->ndev->stats.tx_errors++;
1934                goto frame_not_sent;
1935        }
1936
1937        if (tx_cb->seg_count == 0) {
1938                netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1939                           mac_rsp->transaction_id);
1940
1941                qdev->ndev->stats.tx_errors++;
1942                goto invalid_seg_count;
1943        }
1944
1945        pci_unmap_single(qdev->pdev,
1946                         dma_unmap_addr(&tx_cb->map[0], mapaddr),
1947                         dma_unmap_len(&tx_cb->map[0], maplen),
1948                         PCI_DMA_TODEVICE);
1949        tx_cb->seg_count--;
1950        if (tx_cb->seg_count) {
1951                for (i = 1; i < tx_cb->seg_count; i++) {
1952                        pci_unmap_page(qdev->pdev,
1953                                       dma_unmap_addr(&tx_cb->map[i],
1954                                                      mapaddr),
1955                                       dma_unmap_len(&tx_cb->map[i], maplen),
1956                                       PCI_DMA_TODEVICE);
1957                }
1958        }
1959        qdev->ndev->stats.tx_packets++;
1960        qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1961
1962frame_not_sent:
1963        dev_kfree_skb_irq(tx_cb->skb);
1964        tx_cb->skb = NULL;
1965
1966invalid_seg_count:
1967        atomic_inc(&qdev->tx_count);
1968}
1969
1970static void ql_get_sbuf(struct ql3_adapter *qdev)
1971{
1972        if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1973                qdev->small_buf_index = 0;
1974        qdev->small_buf_release_cnt++;
1975}
1976
1977static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1978{
1979        struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1980        lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1981        qdev->lrg_buf_release_cnt++;
1982        if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1983                qdev->lrg_buf_index = 0;
1984        return lrg_buf_cb;
1985}
1986
1987/*
1988 * The difference between 3022 and 3032 for inbound completions:
1989 * 3022 uses two buffers per completion.  The first buffer contains
1990 * (some) header info, the second the remainder of the headers plus
1991 * the data.  For this chip we reserve some space at the top of the
1992 * receive buffer so that the header info in buffer one can be
1993 * prepended to the buffer two.  Buffer two is the sent up while
1994 * buffer one is returned to the hardware to be reused.
1995 * 3032 receives all of it's data and headers in one buffer for a
1996 * simpler process.  3032 also supports checksum verification as
1997 * can be seen in ql_process_macip_rx_intr().
1998 */
1999static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2000                                   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2001{
2002        struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2003        struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2004        struct sk_buff *skb;
2005        u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2006
2007        /*
2008         * Get the inbound address list (small buffer).
2009         */
2010        ql_get_sbuf(qdev);
2011
2012        if (qdev->device_id == QL3022_DEVICE_ID)
2013                lrg_buf_cb1 = ql_get_lbuf(qdev);
2014
2015        /* start of second buffer */
2016        lrg_buf_cb2 = ql_get_lbuf(qdev);
2017        skb = lrg_buf_cb2->skb;
2018
2019        qdev->ndev->stats.rx_packets++;
2020        qdev->ndev->stats.rx_bytes += length;
2021
2022        skb_put(skb, length);
2023        pci_unmap_single(qdev->pdev,
2024                         dma_unmap_addr(lrg_buf_cb2, mapaddr),
2025                         dma_unmap_len(lrg_buf_cb2, maplen),
2026                         PCI_DMA_FROMDEVICE);
2027        prefetch(skb->data);
2028        skb_checksum_none_assert(skb);
2029        skb->protocol = eth_type_trans(skb, qdev->ndev);
2030
2031        netif_receive_skb(skb);
2032        lrg_buf_cb2->skb = NULL;
2033
2034        if (qdev->device_id == QL3022_DEVICE_ID)
2035                ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2036        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2037}
2038
2039static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2040                                     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2041{
2042        struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2043        struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2044        struct sk_buff *skb1 = NULL, *skb2;
2045        struct net_device *ndev = qdev->ndev;
2046        u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2047        u16 size = 0;
2048
2049        /*
2050         * Get the inbound address list (small buffer).
2051         */
2052
2053        ql_get_sbuf(qdev);
2054
2055        if (qdev->device_id == QL3022_DEVICE_ID) {
2056                /* start of first buffer on 3022 */
2057                lrg_buf_cb1 = ql_get_lbuf(qdev);
2058                skb1 = lrg_buf_cb1->skb;
2059                size = ETH_HLEN;
2060                if (*((u16 *) skb1->data) != 0xFFFF)
2061                        size += VLAN_ETH_HLEN - ETH_HLEN;
2062        }
2063
2064        /* start of second buffer */
2065        lrg_buf_cb2 = ql_get_lbuf(qdev);
2066        skb2 = lrg_buf_cb2->skb;
2067
2068        skb_put(skb2, length);  /* Just the second buffer length here. */
2069        pci_unmap_single(qdev->pdev,
2070                         dma_unmap_addr(lrg_buf_cb2, mapaddr),
2071                         dma_unmap_len(lrg_buf_cb2, maplen),
2072                         PCI_DMA_FROMDEVICE);
2073        prefetch(skb2->data);
2074
2075        skb_checksum_none_assert(skb2);
2076        if (qdev->device_id == QL3022_DEVICE_ID) {
2077                /*
2078                 * Copy the ethhdr from first buffer to second. This
2079                 * is necessary for 3022 IP completions.
2080                 */
2081                skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2082                                                 skb_push(skb2, size), size);
2083        } else {
2084                u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2085                if (checksum &
2086                        (IB_IP_IOCB_RSP_3032_ICE |
2087                         IB_IP_IOCB_RSP_3032_CE)) {
2088                        netdev_err(ndev,
2089                                   "%s: Bad checksum for this %s packet, checksum = %x\n",
2090                                   __func__,
2091                                   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2092                                    "TCP" : "UDP"), checksum);
2093                } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2094                                (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2095                                !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2096                        skb2->ip_summed = CHECKSUM_UNNECESSARY;
2097                }
2098        }
2099        skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2100
2101        netif_receive_skb(skb2);
2102        ndev->stats.rx_packets++;
2103        ndev->stats.rx_bytes += length;
2104        lrg_buf_cb2->skb = NULL;
2105
2106        if (qdev->device_id == QL3022_DEVICE_ID)
2107                ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2108        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2109}
2110
2111static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2112                          int *tx_cleaned, int *rx_cleaned, int work_to_do)
2113{
2114        struct net_rsp_iocb *net_rsp;
2115        struct net_device *ndev = qdev->ndev;
2116        int work_done = 0;
2117
2118        /* While there are entries in the completion queue. */
2119        while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2120                qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2121
2122                net_rsp = qdev->rsp_current;
2123                rmb();
2124                /*
2125                 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2126                 * if the inbound completion is for a VLAN.
2127                 */
2128                if (qdev->device_id == QL3032_DEVICE_ID)
2129                        net_rsp->opcode &= 0x7f;
2130                switch (net_rsp->opcode) {
2131
2132                case OPCODE_OB_MAC_IOCB_FN0:
2133                case OPCODE_OB_MAC_IOCB_FN2:
2134                        ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2135                                               net_rsp);
2136                        (*tx_cleaned)++;
2137                        break;
2138
2139                case OPCODE_IB_MAC_IOCB:
2140                case OPCODE_IB_3032_MAC_IOCB:
2141                        ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2142                                               net_rsp);
2143                        (*rx_cleaned)++;
2144                        break;
2145
2146                case OPCODE_IB_IP_IOCB:
2147                case OPCODE_IB_3032_IP_IOCB:
2148                        ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2149                                                 net_rsp);
2150                        (*rx_cleaned)++;
2151                        break;
2152                default: {
2153                        u32 *tmp = (u32 *)net_rsp;
2154                        netdev_err(ndev,
2155                                   "Hit default case, not handled!\n"
2156                                   "    dropping the packet, opcode = %x\n"
2157                                   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2158                                   net_rsp->opcode,
2159                                   (unsigned long int)tmp[0],
2160                                   (unsigned long int)tmp[1],
2161                                   (unsigned long int)tmp[2],
2162                                   (unsigned long int)tmp[3]);
2163                }
2164                }
2165
2166                qdev->rsp_consumer_index++;
2167
2168                if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2169                        qdev->rsp_consumer_index = 0;
2170                        qdev->rsp_current = qdev->rsp_q_virt_addr;
2171                } else {
2172                        qdev->rsp_current++;
2173                }
2174
2175                work_done = *tx_cleaned + *rx_cleaned;
2176        }
2177
2178        return work_done;
2179}
2180
2181static int ql_poll(struct napi_struct *napi, int budget)
2182{
2183        struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2184        int rx_cleaned = 0, tx_cleaned = 0;
2185        unsigned long hw_flags;
2186        struct ql3xxx_port_registers __iomem *port_regs =
2187                qdev->mem_map_registers;
2188
2189        ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2190
2191        if (tx_cleaned + rx_cleaned != budget) {
2192                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2193                __napi_complete(napi);
2194                ql_update_small_bufq_prod_index(qdev);
2195                ql_update_lrg_bufq_prod_index(qdev);
2196                writel(qdev->rsp_consumer_index,
2197                            &port_regs->CommonRegs.rspQConsumerIndex);
2198                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2199
2200                ql_enable_interrupts(qdev);
2201        }
2202        return tx_cleaned + rx_cleaned;
2203}
2204
2205static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2206{
2207
2208        struct net_device *ndev = dev_id;
2209        struct ql3_adapter *qdev = netdev_priv(ndev);
2210        struct ql3xxx_port_registers __iomem *port_regs =
2211                qdev->mem_map_registers;
2212        u32 value;
2213        int handled = 1;
2214        u32 var;
2215
2216        value = ql_read_common_reg_l(qdev,
2217                                     &port_regs->CommonRegs.ispControlStatus);
2218
2219        if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2220                spin_lock(&qdev->adapter_lock);
2221                netif_stop_queue(qdev->ndev);
2222                netif_carrier_off(qdev->ndev);
2223                ql_disable_interrupts(qdev);
2224                qdev->port_link_state = LS_DOWN;
2225                set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2226
2227                if (value & ISP_CONTROL_FE) {
2228                        /*
2229                         * Chip Fatal Error.
2230                         */
2231                        var =
2232                            ql_read_page0_reg_l(qdev,
2233                                              &port_regs->PortFatalErrStatus);
2234                        netdev_warn(ndev,
2235                                    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2236                                    var);
2237                        set_bit(QL_RESET_START, &qdev->flags) ;
2238                } else {
2239                        /*
2240                         * Soft Reset Requested.
2241                         */
2242                        set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2243                        netdev_err(ndev,
2244                                   "Another function issued a reset to the chip. ISR value = %x\n",
2245                                   value);
2246                }
2247                queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2248                spin_unlock(&qdev->adapter_lock);
2249        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2250                ql_disable_interrupts(qdev);
2251                if (likely(napi_schedule_prep(&qdev->napi)))
2252                        __napi_schedule(&qdev->napi);
2253        } else
2254                return IRQ_NONE;
2255
2256        return IRQ_RETVAL(handled);
2257}
2258
2259/*
2260 * Get the total number of segments needed for the given number of fragments.
2261 * This is necessary because outbound address lists (OAL) will be used when
2262 * more than two frags are given.  Each address list has 5 addr/len pairs.
2263 * The 5th pair in each OAL is used to  point to the next OAL if more frags
2264 * are coming.  That is why the frags:segment count ratio is not linear.
2265 */
2266static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2267{
2268        if (qdev->device_id == QL3022_DEVICE_ID)
2269                return 1;
2270
2271        if (frags <= 2)
2272                return frags + 1;
2273        else if (frags <= 6)
2274                return frags + 2;
2275        else if (frags <= 10)
2276                return frags + 3;
2277        else if (frags <= 14)
2278                return frags + 4;
2279        else if (frags <= 18)
2280                return frags + 5;
2281        return -1;
2282}
2283
2284static void ql_hw_csum_setup(const struct sk_buff *skb,
2285                             struct ob_mac_iocb_req *mac_iocb_ptr)
2286{
2287        const struct iphdr *ip = ip_hdr(skb);
2288
2289        mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2290        mac_iocb_ptr->ip_hdr_len = ip->ihl;
2291
2292        if (ip->protocol == IPPROTO_TCP) {
2293                mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2294                        OB_3032MAC_IOCB_REQ_IC;
2295        } else {
2296                mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2297                        OB_3032MAC_IOCB_REQ_IC;
2298        }
2299
2300}
2301
2302/*
2303 * Map the buffers for this transmit.
2304 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2305 */
2306static int ql_send_map(struct ql3_adapter *qdev,
2307                                struct ob_mac_iocb_req *mac_iocb_ptr,
2308                                struct ql_tx_buf_cb *tx_cb,
2309                                struct sk_buff *skb)
2310{
2311        struct oal *oal;
2312        struct oal_entry *oal_entry;
2313        int len = skb_headlen(skb);
2314        dma_addr_t map;
2315        int err;
2316        int completed_segs, i;
2317        int seg_cnt, seg = 0;
2318        int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2319
2320        seg_cnt = tx_cb->seg_count;
2321        /*
2322         * Map the skb buffer first.
2323         */
2324        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2325
2326        err = pci_dma_mapping_error(qdev->pdev, map);
2327        if (err) {
2328                netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2329                           err);
2330
2331                return NETDEV_TX_BUSY;
2332        }
2333
2334        oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2335        oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2336        oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2337        oal_entry->len = cpu_to_le32(len);
2338        dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2339        dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2340        seg++;
2341
2342        if (seg_cnt == 1) {
2343                /* Terminate the last segment. */
2344                oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2345                return NETDEV_TX_OK;
2346        }
2347        oal = tx_cb->oal;
2348        for (completed_segs = 0;
2349             completed_segs < frag_cnt;
2350             completed_segs++, seg++) {
2351                skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2352                oal_entry++;
2353                /*
2354                 * Check for continuation requirements.
2355                 * It's strange but necessary.
2356                 * Continuation entry points to outbound address list.
2357                 */
2358                if ((seg == 2 && seg_cnt > 3) ||
2359                    (seg == 7 && seg_cnt > 8) ||
2360                    (seg == 12 && seg_cnt > 13) ||
2361                    (seg == 17 && seg_cnt > 18)) {
2362                        map = pci_map_single(qdev->pdev, oal,
2363                                             sizeof(struct oal),
2364                                             PCI_DMA_TODEVICE);
2365
2366                        err = pci_dma_mapping_error(qdev->pdev, map);
2367                        if (err) {
2368                                netdev_err(qdev->ndev,
2369                                           "PCI mapping outbound address list with error: %d\n",
2370                                           err);
2371                                goto map_error;
2372                        }
2373
2374                        oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2375                        oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2376                        oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2377                                                     OAL_CONT_ENTRY);
2378                        dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2379                        dma_unmap_len_set(&tx_cb->map[seg], maplen,
2380                                          sizeof(struct oal));
2381                        oal_entry = (struct oal_entry *)oal;
2382                        oal++;
2383                        seg++;
2384                }
2385
2386                map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2387                                       DMA_TO_DEVICE);
2388
2389                err = dma_mapping_error(&qdev->pdev->dev, map);
2390                if (err) {
2391                        netdev_err(qdev->ndev,
2392                                   "PCI mapping frags failed with error: %d\n",
2393                                   err);
2394                        goto map_error;
2395                }
2396
2397                oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2398                oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2399                oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2400                dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2401                dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2402                }
2403        /* Terminate the last segment. */
2404        oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2405        return NETDEV_TX_OK;
2406
2407map_error:
2408        /* A PCI mapping failed and now we will need to back out
2409         * We need to traverse through the oal's and associated pages which
2410         * have been mapped and now we must unmap them to clean up properly
2411         */
2412
2413        seg = 1;
2414        oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2415        oal = tx_cb->oal;
2416        for (i = 0; i < completed_segs; i++, seg++) {
2417                oal_entry++;
2418
2419                /*
2420                 * Check for continuation requirements.
2421                 * It's strange but necessary.
2422                 */
2423
2424                if ((seg == 2 && seg_cnt > 3) ||
2425                    (seg == 7 && seg_cnt > 8) ||
2426                    (seg == 12 && seg_cnt > 13) ||
2427                    (seg == 17 && seg_cnt > 18)) {
2428                        pci_unmap_single(qdev->pdev,
2429                                dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2430                                dma_unmap_len(&tx_cb->map[seg], maplen),
2431                                 PCI_DMA_TODEVICE);
2432                        oal++;
2433                        seg++;
2434                }
2435
2436                pci_unmap_page(qdev->pdev,
2437                               dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2438                               dma_unmap_len(&tx_cb->map[seg], maplen),
2439                               PCI_DMA_TODEVICE);
2440        }
2441
2442        pci_unmap_single(qdev->pdev,
2443                         dma_unmap_addr(&tx_cb->map[0], mapaddr),
2444                         dma_unmap_addr(&tx_cb->map[0], maplen),
2445                         PCI_DMA_TODEVICE);
2446
2447        return NETDEV_TX_BUSY;
2448
2449}
2450
2451/*
2452 * The difference between 3022 and 3032 sends:
2453 * 3022 only supports a simple single segment transmission.
2454 * 3032 supports checksumming and scatter/gather lists (fragments).
2455 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2456 * in the IOCB plus a chain of outbound address lists (OAL) that
2457 * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2458 * will be used to point to an OAL when more ALP entries are required.
2459 * The IOCB is always the top of the chain followed by one or more
2460 * OALs (when necessary).
2461 */
2462static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2463                               struct net_device *ndev)
2464{
2465        struct ql3_adapter *qdev = netdev_priv(ndev);
2466        struct ql3xxx_port_registers __iomem *port_regs =
2467                        qdev->mem_map_registers;
2468        struct ql_tx_buf_cb *tx_cb;
2469        u32 tot_len = skb->len;
2470        struct ob_mac_iocb_req *mac_iocb_ptr;
2471
2472        if (unlikely(atomic_read(&qdev->tx_count) < 2))
2473                return NETDEV_TX_BUSY;
2474
2475        tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2476        tx_cb->seg_count = ql_get_seg_count(qdev,
2477                                             skb_shinfo(skb)->nr_frags);
2478        if (tx_cb->seg_count == -1) {
2479                netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2480                return NETDEV_TX_OK;
2481        }
2482
2483        mac_iocb_ptr = tx_cb->queue_entry;
2484        memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2485        mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2486        mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2487        mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2488        mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2489        mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2490        tx_cb->skb = skb;
2491        if (qdev->device_id == QL3032_DEVICE_ID &&
2492            skb->ip_summed == CHECKSUM_PARTIAL)
2493                ql_hw_csum_setup(skb, mac_iocb_ptr);
2494
2495        if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2496                netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2497                return NETDEV_TX_BUSY;
2498        }
2499
2500        wmb();
2501        qdev->req_producer_index++;
2502        if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2503                qdev->req_producer_index = 0;
2504        wmb();
2505        ql_write_common_reg_l(qdev,
2506                            &port_regs->CommonRegs.reqQProducerIndex,
2507                            qdev->req_producer_index);
2508
2509        netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2510                     "tx queued, slot %d, len %d\n",
2511                     qdev->req_producer_index, skb->len);
2512
2513        atomic_dec(&qdev->tx_count);
2514        return NETDEV_TX_OK;
2515}
2516
2517static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2518{
2519        qdev->req_q_size =
2520            (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2521
2522        qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2523
2524        /* The barrier is required to ensure request and response queue
2525         * addr writes to the registers.
2526         */
2527        wmb();
2528
2529        qdev->req_q_virt_addr =
2530            pci_alloc_consistent(qdev->pdev,
2531                                 (size_t) qdev->req_q_size,
2532                                 &qdev->req_q_phy_addr);
2533
2534        if ((qdev->req_q_virt_addr == NULL) ||
2535            LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2536                netdev_err(qdev->ndev, "reqQ failed\n");
2537                return -ENOMEM;
2538        }
2539
2540        qdev->rsp_q_virt_addr =
2541            pci_alloc_consistent(qdev->pdev,
2542                                 (size_t) qdev->rsp_q_size,
2543                                 &qdev->rsp_q_phy_addr);
2544
2545        if ((qdev->rsp_q_virt_addr == NULL) ||
2546            LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2547                netdev_err(qdev->ndev, "rspQ allocation failed\n");
2548                pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2549                                    qdev->req_q_virt_addr,
2550                                    qdev->req_q_phy_addr);
2551                return -ENOMEM;
2552        }
2553
2554        set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2555
2556        return 0;
2557}
2558
2559static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2560{
2561        if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2562                netdev_info(qdev->ndev, "Already done\n");
2563                return;
2564        }
2565
2566        pci_free_consistent(qdev->pdev,
2567                            qdev->req_q_size,
2568                            qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2569
2570        qdev->req_q_virt_addr = NULL;
2571
2572        pci_free_consistent(qdev->pdev,
2573                            qdev->rsp_q_size,
2574                            qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2575
2576        qdev->rsp_q_virt_addr = NULL;
2577
2578        clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2579}
2580
2581static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2582{
2583        /* Create Large Buffer Queue */
2584        qdev->lrg_buf_q_size =
2585                qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2586        if (qdev->lrg_buf_q_size < PAGE_SIZE)
2587                qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2588        else
2589                qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2590
2591        qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2592                                      sizeof(struct ql_rcv_buf_cb),
2593                                      GFP_KERNEL);
2594        if (qdev->lrg_buf == NULL)
2595                return -ENOMEM;
2596
2597        qdev->lrg_buf_q_alloc_virt_addr =
2598                pci_alloc_consistent(qdev->pdev,
2599                                     qdev->lrg_buf_q_alloc_size,
2600                                     &qdev->lrg_buf_q_alloc_phy_addr);
2601
2602        if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2603                netdev_err(qdev->ndev, "lBufQ failed\n");
2604                return -ENOMEM;
2605        }
2606        qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2607        qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2608
2609        /* Create Small Buffer Queue */
2610        qdev->small_buf_q_size =
2611                NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2612        if (qdev->small_buf_q_size < PAGE_SIZE)
2613                qdev->small_buf_q_alloc_size = PAGE_SIZE;
2614        else
2615                qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2616
2617        qdev->small_buf_q_alloc_virt_addr =
2618                pci_alloc_consistent(qdev->pdev,
2619                                     qdev->small_buf_q_alloc_size,
2620                                     &qdev->small_buf_q_alloc_phy_addr);
2621
2622        if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2623                netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2624                pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2625                                    qdev->lrg_buf_q_alloc_virt_addr,
2626                                    qdev->lrg_buf_q_alloc_phy_addr);
2627                return -ENOMEM;
2628        }
2629
2630        qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2631        qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2632        set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2633        return 0;
2634}
2635
2636static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2637{
2638        if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2639                netdev_info(qdev->ndev, "Already done\n");
2640                return;
2641        }
2642        kfree(qdev->lrg_buf);
2643        pci_free_consistent(qdev->pdev,
2644                            qdev->lrg_buf_q_alloc_size,
2645                            qdev->lrg_buf_q_alloc_virt_addr,
2646                            qdev->lrg_buf_q_alloc_phy_addr);
2647
2648        qdev->lrg_buf_q_virt_addr = NULL;
2649
2650        pci_free_consistent(qdev->pdev,
2651                            qdev->small_buf_q_alloc_size,
2652                            qdev->small_buf_q_alloc_virt_addr,
2653                            qdev->small_buf_q_alloc_phy_addr);
2654
2655        qdev->small_buf_q_virt_addr = NULL;
2656
2657        clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2658}
2659
2660static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2661{
2662        int i;
2663        struct bufq_addr_element *small_buf_q_entry;
2664
2665        /* Currently we allocate on one of memory and use it for smallbuffers */
2666        qdev->small_buf_total_size =
2667                (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2668                 QL_SMALL_BUFFER_SIZE);
2669
2670        qdev->small_buf_virt_addr =
2671                pci_alloc_consistent(qdev->pdev,
2672                                     qdev->small_buf_total_size,
2673                                     &qdev->small_buf_phy_addr);
2674
2675        if (qdev->small_buf_virt_addr == NULL) {
2676                netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2677                return -ENOMEM;
2678        }
2679
2680        qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2681        qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2682
2683        small_buf_q_entry = qdev->small_buf_q_virt_addr;
2684
2685        /* Initialize the small buffer queue. */
2686        for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2687                small_buf_q_entry->addr_high =
2688                    cpu_to_le32(qdev->small_buf_phy_addr_high);
2689                small_buf_q_entry->addr_low =
2690                    cpu_to_le32(qdev->small_buf_phy_addr_low +
2691                                (i * QL_SMALL_BUFFER_SIZE));
2692                small_buf_q_entry++;
2693        }
2694        qdev->small_buf_index = 0;
2695        set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2696        return 0;
2697}
2698
2699static void ql_free_small_buffers(struct ql3_adapter *qdev)
2700{
2701        if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2702                netdev_info(qdev->ndev, "Already done\n");
2703                return;
2704        }
2705        if (qdev->small_buf_virt_addr != NULL) {
2706                pci_free_consistent(qdev->pdev,
2707                                    qdev->small_buf_total_size,
2708                                    qdev->small_buf_virt_addr,
2709                                    qdev->small_buf_phy_addr);
2710
2711                qdev->small_buf_virt_addr = NULL;
2712        }
2713}
2714
2715static void ql_free_large_buffers(struct ql3_adapter *qdev)
2716{
2717        int i = 0;
2718        struct ql_rcv_buf_cb *lrg_buf_cb;
2719
2720        for (i = 0; i < qdev->num_large_buffers; i++) {
2721                lrg_buf_cb = &qdev->lrg_buf[i];
2722                if (lrg_buf_cb->skb) {
2723                        dev_kfree_skb(lrg_buf_cb->skb);
2724                        pci_unmap_single(qdev->pdev,
2725                                         dma_unmap_addr(lrg_buf_cb, mapaddr),
2726                                         dma_unmap_len(lrg_buf_cb, maplen),
2727                                         PCI_DMA_FROMDEVICE);
2728                        memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2729                } else {
2730                        break;
2731                }
2732        }
2733}
2734
2735static void ql_init_large_buffers(struct ql3_adapter *qdev)
2736{
2737        int i;
2738        struct ql_rcv_buf_cb *lrg_buf_cb;
2739        struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2740
2741        for (i = 0; i < qdev->num_large_buffers; i++) {
2742                lrg_buf_cb = &qdev->lrg_buf[i];
2743                buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2744                buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2745                buf_addr_ele++;
2746        }
2747        qdev->lrg_buf_index = 0;
2748        qdev->lrg_buf_skb_check = 0;
2749}
2750
2751static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2752{
2753        int i;
2754        struct ql_rcv_buf_cb *lrg_buf_cb;
2755        struct sk_buff *skb;
2756        dma_addr_t map;
2757        int err;
2758
2759        for (i = 0; i < qdev->num_large_buffers; i++) {
2760                skb = netdev_alloc_skb(qdev->ndev,
2761                                       qdev->lrg_buffer_len);
2762                if (unlikely(!skb)) {
2763                        /* Better luck next round */
2764                        netdev_err(qdev->ndev,
2765                                   "large buff alloc failed for %d bytes at index %d\n",
2766                                   qdev->lrg_buffer_len * 2, i);
2767                        ql_free_large_buffers(qdev);
2768                        return -ENOMEM;
2769                } else {
2770
2771                        lrg_buf_cb = &qdev->lrg_buf[i];
2772                        memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2773                        lrg_buf_cb->index = i;
2774                        lrg_buf_cb->skb = skb;
2775                        /*
2776                         * We save some space to copy the ethhdr from first
2777                         * buffer
2778                         */
2779                        skb_reserve(skb, QL_HEADER_SPACE);
2780                        map = pci_map_single(qdev->pdev,
2781                                             skb->data,
2782                                             qdev->lrg_buffer_len -
2783                                             QL_HEADER_SPACE,
2784                                             PCI_DMA_FROMDEVICE);
2785
2786                        err = pci_dma_mapping_error(qdev->pdev, map);
2787                        if (err) {
2788                                netdev_err(qdev->ndev,
2789                                           "PCI mapping failed with error: %d\n",
2790                                           err);
2791                                ql_free_large_buffers(qdev);
2792                                return -ENOMEM;
2793                        }
2794
2795                        dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2796                        dma_unmap_len_set(lrg_buf_cb, maplen,
2797                                          qdev->lrg_buffer_len -
2798                                          QL_HEADER_SPACE);
2799                        lrg_buf_cb->buf_phy_addr_low =
2800                            cpu_to_le32(LS_64BITS(map));
2801                        lrg_buf_cb->buf_phy_addr_high =
2802                            cpu_to_le32(MS_64BITS(map));
2803                }
2804        }
2805        return 0;
2806}
2807
2808static void ql_free_send_free_list(struct ql3_adapter *qdev)
2809{
2810        struct ql_tx_buf_cb *tx_cb;
2811        int i;
2812
2813        tx_cb = &qdev->tx_buf[0];
2814        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2815                kfree(tx_cb->oal);
2816                tx_cb->oal = NULL;
2817                tx_cb++;
2818        }
2819}
2820
2821static int ql_create_send_free_list(struct ql3_adapter *qdev)
2822{
2823        struct ql_tx_buf_cb *tx_cb;
2824        int i;
2825        struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2826
2827        /* Create free list of transmit buffers */
2828        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2829
2830                tx_cb = &qdev->tx_buf[i];
2831                tx_cb->skb = NULL;
2832                tx_cb->queue_entry = req_q_curr;
2833                req_q_curr++;
2834                tx_cb->oal = kmalloc(512, GFP_KERNEL);
2835                if (tx_cb->oal == NULL)
2836                        return -ENOMEM;
2837        }
2838        return 0;
2839}
2840
2841static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2842{
2843        if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2844                qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2845                qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2846        } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2847                /*
2848                 * Bigger buffers, so less of them.
2849                 */
2850                qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2851                qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2852        } else {
2853                netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
2854                           qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2855                return -ENOMEM;
2856        }
2857        qdev->num_large_buffers =
2858                qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2859        qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2860        qdev->max_frame_size =
2861                (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2862
2863        /*
2864         * First allocate a page of shared memory and use it for shadow
2865         * locations of Network Request Queue Consumer Address Register and
2866         * Network Completion Queue Producer Index Register
2867         */
2868        qdev->shadow_reg_virt_addr =
2869                pci_alloc_consistent(qdev->pdev,
2870                                     PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2871
2872        if (qdev->shadow_reg_virt_addr != NULL) {
2873                qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2874                qdev->req_consumer_index_phy_addr_high =
2875                        MS_64BITS(qdev->shadow_reg_phy_addr);
2876                qdev->req_consumer_index_phy_addr_low =
2877                        LS_64BITS(qdev->shadow_reg_phy_addr);
2878
2879                qdev->prsp_producer_index =
2880                        (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2881                qdev->rsp_producer_index_phy_addr_high =
2882                        qdev->req_consumer_index_phy_addr_high;
2883                qdev->rsp_producer_index_phy_addr_low =
2884                        qdev->req_consumer_index_phy_addr_low + 8;
2885        } else {
2886                netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2887                return -ENOMEM;
2888        }
2889
2890        if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2891                netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2892                goto err_req_rsp;
2893        }
2894
2895        if (ql_alloc_buffer_queues(qdev) != 0) {
2896                netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2897                goto err_buffer_queues;
2898        }
2899
2900        if (ql_alloc_small_buffers(qdev) != 0) {
2901                netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2902                goto err_small_buffers;
2903        }
2904
2905        if (ql_alloc_large_buffers(qdev) != 0) {
2906                netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2907                goto err_small_buffers;
2908        }
2909
2910        /* Initialize the large buffer queue. */
2911        ql_init_large_buffers(qdev);
2912        if (ql_create_send_free_list(qdev))
2913                goto err_free_list;
2914
2915        qdev->rsp_current = qdev->rsp_q_virt_addr;
2916
2917        return 0;
2918err_free_list:
2919        ql_free_send_free_list(qdev);
2920err_small_buffers:
2921        ql_free_buffer_queues(qdev);
2922err_buffer_queues:
2923        ql_free_net_req_rsp_queues(qdev);
2924err_req_rsp:
2925        pci_free_consistent(qdev->pdev,
2926                            PAGE_SIZE,
2927                            qdev->shadow_reg_virt_addr,
2928                            qdev->shadow_reg_phy_addr);
2929
2930        return -ENOMEM;
2931}
2932
2933static void ql_free_mem_resources(struct ql3_adapter *qdev)
2934{
2935        ql_free_send_free_list(qdev);
2936        ql_free_large_buffers(qdev);
2937        ql_free_small_buffers(qdev);
2938        ql_free_buffer_queues(qdev);
2939        ql_free_net_req_rsp_queues(qdev);
2940        if (qdev->shadow_reg_virt_addr != NULL) {
2941                pci_free_consistent(qdev->pdev,
2942                                    PAGE_SIZE,
2943                                    qdev->shadow_reg_virt_addr,
2944                                    qdev->shadow_reg_phy_addr);
2945                qdev->shadow_reg_virt_addr = NULL;
2946        }
2947}
2948
2949static int ql_init_misc_registers(struct ql3_adapter *qdev)
2950{
2951        struct ql3xxx_local_ram_registers __iomem *local_ram =
2952            (void __iomem *)qdev->mem_map_registers;
2953
2954        if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2955                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2956                         2) << 4))
2957                return -1;
2958
2959        ql_write_page2_reg(qdev,
2960                           &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2961
2962        ql_write_page2_reg(qdev,
2963                           &local_ram->maxBufletCount,
2964                           qdev->nvram_data.bufletCount);
2965
2966        ql_write_page2_reg(qdev,
2967                           &local_ram->freeBufletThresholdLow,
2968                           (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2969                           (qdev->nvram_data.tcpWindowThreshold0));
2970
2971        ql_write_page2_reg(qdev,
2972                           &local_ram->freeBufletThresholdHigh,
2973                           qdev->nvram_data.tcpWindowThreshold50);
2974
2975        ql_write_page2_reg(qdev,
2976                           &local_ram->ipHashTableBase,
2977                           (qdev->nvram_data.ipHashTableBaseHi << 16) |
2978                           qdev->nvram_data.ipHashTableBaseLo);
2979        ql_write_page2_reg(qdev,
2980                           &local_ram->ipHashTableCount,
2981                           qdev->nvram_data.ipHashTableSize);
2982        ql_write_page2_reg(qdev,
2983                           &local_ram->tcpHashTableBase,
2984                           (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2985                           qdev->nvram_data.tcpHashTableBaseLo);
2986        ql_write_page2_reg(qdev,
2987                           &local_ram->tcpHashTableCount,
2988                           qdev->nvram_data.tcpHashTableSize);
2989        ql_write_page2_reg(qdev,
2990                           &local_ram->ncbBase,
2991                           (qdev->nvram_data.ncbTableBaseHi << 16) |
2992                           qdev->nvram_data.ncbTableBaseLo);
2993        ql_write_page2_reg(qdev,
2994                           &local_ram->maxNcbCount,
2995                           qdev->nvram_data.ncbTableSize);
2996        ql_write_page2_reg(qdev,
2997                           &local_ram->drbBase,
2998                           (qdev->nvram_data.drbTableBaseHi << 16) |
2999                           qdev->nvram_data.drbTableBaseLo);
3000        ql_write_page2_reg(qdev,
3001                           &local_ram->maxDrbCount,
3002                           qdev->nvram_data.drbTableSize);
3003        ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3004        return 0;
3005}
3006
3007static int ql_adapter_initialize(struct ql3_adapter *qdev)
3008{
3009        u32 value;
3010        struct ql3xxx_port_registers __iomem *port_regs =
3011                qdev->mem_map_registers;
3012        __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3013        struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3014                (void __iomem *)port_regs;
3015        u32 delay = 10;
3016        int status = 0;
3017
3018        if (ql_mii_setup(qdev))
3019                return -1;
3020
3021        /* Bring out PHY out of reset */
3022        ql_write_common_reg(qdev, spir,
3023                            (ISP_SERIAL_PORT_IF_WE |
3024                             (ISP_SERIAL_PORT_IF_WE << 16)));
3025        /* Give the PHY time to come out of reset. */
3026        mdelay(100);
3027        qdev->port_link_state = LS_DOWN;
3028        netif_carrier_off(qdev->ndev);
3029
3030        /* V2 chip fix for ARS-39168. */
3031        ql_write_common_reg(qdev, spir,
3032                            (ISP_SERIAL_PORT_IF_SDE |
3033                             (ISP_SERIAL_PORT_IF_SDE << 16)));
3034
3035        /* Request Queue Registers */
3036        *((u32 *)(qdev->preq_consumer_index)) = 0;
3037        atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3038        qdev->req_producer_index = 0;
3039
3040        ql_write_page1_reg(qdev,
3041                           &hmem_regs->reqConsumerIndexAddrHigh,
3042                           qdev->req_consumer_index_phy_addr_high);
3043        ql_write_page1_reg(qdev,
3044                           &hmem_regs->reqConsumerIndexAddrLow,
3045                           qdev->req_consumer_index_phy_addr_low);
3046
3047        ql_write_page1_reg(qdev,
3048                           &hmem_regs->reqBaseAddrHigh,
3049                           MS_64BITS(qdev->req_q_phy_addr));
3050        ql_write_page1_reg(qdev,
3051                           &hmem_regs->reqBaseAddrLow,
3052                           LS_64BITS(qdev->req_q_phy_addr));
3053        ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3054
3055        /* Response Queue Registers */
3056        *((__le16 *) (qdev->prsp_producer_index)) = 0;
3057        qdev->rsp_consumer_index = 0;
3058        qdev->rsp_current = qdev->rsp_q_virt_addr;
3059
3060        ql_write_page1_reg(qdev,
3061                           &hmem_regs->rspProducerIndexAddrHigh,
3062                           qdev->rsp_producer_index_phy_addr_high);
3063
3064        ql_write_page1_reg(qdev,
3065                           &hmem_regs->rspProducerIndexAddrLow,
3066                           qdev->rsp_producer_index_phy_addr_low);
3067
3068        ql_write_page1_reg(qdev,
3069                           &hmem_regs->rspBaseAddrHigh,
3070                           MS_64BITS(qdev->rsp_q_phy_addr));
3071
3072        ql_write_page1_reg(qdev,
3073                           &hmem_regs->rspBaseAddrLow,
3074                           LS_64BITS(qdev->rsp_q_phy_addr));
3075
3076        ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3077
3078        /* Large Buffer Queue */
3079        ql_write_page1_reg(qdev,
3080                           &hmem_regs->rxLargeQBaseAddrHigh,
3081                           MS_64BITS(qdev->lrg_buf_q_phy_addr));
3082
3083        ql_write_page1_reg(qdev,
3084                           &hmem_regs->rxLargeQBaseAddrLow,
3085                           LS_64BITS(qdev->lrg_buf_q_phy_addr));
3086
3087        ql_write_page1_reg(qdev,
3088                           &hmem_regs->rxLargeQLength,
3089                           qdev->num_lbufq_entries);
3090
3091        ql_write_page1_reg(qdev,
3092                           &hmem_regs->rxLargeBufferLength,
3093                           qdev->lrg_buffer_len);
3094
3095        /* Small Buffer Queue */
3096        ql_write_page1_reg(qdev,
3097                           &hmem_regs->rxSmallQBaseAddrHigh,
3098                           MS_64BITS(qdev->small_buf_q_phy_addr));
3099
3100        ql_write_page1_reg(qdev,
3101                           &hmem_regs->rxSmallQBaseAddrLow,
3102                           LS_64BITS(qdev->small_buf_q_phy_addr));
3103
3104        ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3105        ql_write_page1_reg(qdev,
3106                           &hmem_regs->rxSmallBufferLength,
3107                           QL_SMALL_BUFFER_SIZE);
3108
3109        qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3110        qdev->small_buf_release_cnt = 8;
3111        qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3112        qdev->lrg_buf_release_cnt = 8;
3113        qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3114        qdev->small_buf_index = 0;
3115        qdev->lrg_buf_index = 0;
3116        qdev->lrg_buf_free_count = 0;
3117        qdev->lrg_buf_free_head = NULL;
3118        qdev->lrg_buf_free_tail = NULL;
3119
3120        ql_write_common_reg(qdev,
3121                            &port_regs->CommonRegs.
3122                            rxSmallQProducerIndex,
3123                            qdev->small_buf_q_producer_index);
3124        ql_write_common_reg(qdev,
3125                            &port_regs->CommonRegs.
3126                            rxLargeQProducerIndex,
3127                            qdev->lrg_buf_q_producer_index);
3128
3129        /*
3130         * Find out if the chip has already been initialized.  If it has, then
3131         * we skip some of the initialization.
3132         */
3133        clear_bit(QL_LINK_MASTER, &qdev->flags);
3134        value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3135        if ((value & PORT_STATUS_IC) == 0) {
3136
3137                /* Chip has not been configured yet, so let it rip. */
3138                if (ql_init_misc_registers(qdev)) {
3139                        status = -1;
3140                        goto out;
3141                }
3142
3143                value = qdev->nvram_data.tcpMaxWindowSize;
3144                ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3145
3146                value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3147
3148                if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3149                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3150                                 * 2) << 13)) {
3151                        status = -1;
3152                        goto out;
3153                }
3154                ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3155                ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3156                                   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3157                                     16) | (INTERNAL_CHIP_SD |
3158                                            INTERNAL_CHIP_WE)));
3159                ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3160        }
3161
3162        if (qdev->mac_index)
3163                ql_write_page0_reg(qdev,
3164                                   &port_regs->mac1MaxFrameLengthReg,
3165                                   qdev->max_frame_size);
3166        else
3167                ql_write_page0_reg(qdev,
3168                                           &port_regs->mac0MaxFrameLengthReg,
3169                                           qdev->max_frame_size);
3170
3171        if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3172                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3173                         2) << 7)) {
3174                status = -1;
3175                goto out;
3176        }
3177
3178        PHY_Setup(qdev);
3179        ql_init_scan_mode(qdev);
3180        ql_get_phy_owner(qdev);
3181
3182        /* Load the MAC Configuration */
3183
3184        /* Program lower 32 bits of the MAC address */
3185        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3186                           (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3187        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3188                           ((qdev->ndev->dev_addr[2] << 24)
3189                            | (qdev->ndev->dev_addr[3] << 16)
3190                            | (qdev->ndev->dev_addr[4] << 8)
3191                            | qdev->ndev->dev_addr[5]));
3192
3193        /* Program top 16 bits of the MAC address */
3194        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3195                           ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3196        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3197                           ((qdev->ndev->dev_addr[0] << 8)
3198                            | qdev->ndev->dev_addr[1]));
3199
3200        /* Enable Primary MAC */
3201        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3202                           ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3203                            MAC_ADDR_INDIRECT_PTR_REG_PE));
3204
3205        /* Clear Primary and Secondary IP addresses */
3206        ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3207                           ((IP_ADDR_INDEX_REG_MASK << 16) |
3208                            (qdev->mac_index << 2)));
3209        ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3210
3211        ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3212                           ((IP_ADDR_INDEX_REG_MASK << 16) |
3213                            ((qdev->mac_index << 2) + 1)));
3214        ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3215
3216        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3217
3218        /* Indicate Configuration Complete */
3219        ql_write_page0_reg(qdev,
3220                           &port_regs->portControl,
3221                           ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3222
3223        do {
3224                value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3225                if (value & PORT_STATUS_IC)
3226                        break;
3227                spin_unlock_irq(&qdev->hw_lock);
3228                msleep(500);
3229                spin_lock_irq(&qdev->hw_lock);
3230        } while (--delay);
3231
3232        if (delay == 0) {
3233                netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3234                status = -1;
3235                goto out;
3236        }
3237
3238        /* Enable Ethernet Function */
3239        if (qdev->device_id == QL3032_DEVICE_ID) {
3240                value =
3241                    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3242                     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3243                        QL3032_PORT_CONTROL_ET);
3244                ql_write_page0_reg(qdev, &port_regs->functionControl,
3245                                   ((value << 16) | value));
3246        } else {
3247                value =
3248                    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3249                     PORT_CONTROL_HH);
3250                ql_write_page0_reg(qdev, &port_regs->portControl,
3251                                   ((value << 16) | value));
3252        }
3253
3254
3255out:
3256        return status;
3257}
3258
3259/*
3260 * Caller holds hw_lock.
3261 */
3262static int ql_adapter_reset(struct ql3_adapter *qdev)
3263{
3264        struct ql3xxx_port_registers __iomem *port_regs =
3265                qdev->mem_map_registers;
3266        int status = 0;
3267        u16 value;
3268        int max_wait_time;
3269
3270        set_bit(QL_RESET_ACTIVE, &qdev->flags);
3271        clear_bit(QL_RESET_DONE, &qdev->flags);
3272
3273        /*
3274         * Issue soft reset to chip.
3275         */
3276        netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3277        ql_write_common_reg(qdev,
3278                            &port_regs->CommonRegs.ispControlStatus,
3279                            ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3280
3281        /* Wait 3 seconds for reset to complete. */
3282        netdev_printk(KERN_DEBUG, qdev->ndev,
3283                      "Wait 10 milliseconds for reset to complete\n");
3284
3285        /* Wait until the firmware tells us the Soft Reset is done */
3286        max_wait_time = 5;
3287        do {
3288                value =
3289                    ql_read_common_reg(qdev,
3290                                       &port_regs->CommonRegs.ispControlStatus);
3291                if ((value & ISP_CONTROL_SR) == 0)
3292                        break;
3293
3294                ssleep(1);
3295        } while ((--max_wait_time));
3296
3297        /*
3298         * Also, make sure that the Network Reset Interrupt bit has been
3299         * cleared after the soft reset has taken place.
3300         */
3301        value =
3302            ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3303        if (value & ISP_CONTROL_RI) {
3304                netdev_printk(KERN_DEBUG, qdev->ndev,
3305                              "clearing RI after reset\n");
3306                ql_write_common_reg(qdev,
3307                                    &port_regs->CommonRegs.
3308                                    ispControlStatus,
3309                                    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3310        }
3311
3312        if (max_wait_time == 0) {
3313                /* Issue Force Soft Reset */
3314                ql_write_common_reg(qdev,
3315                                    &port_regs->CommonRegs.
3316                                    ispControlStatus,
3317                                    ((ISP_CONTROL_FSR << 16) |
3318                                     ISP_CONTROL_FSR));
3319                /*
3320                 * Wait until the firmware tells us the Force Soft Reset is
3321                 * done
3322                 */
3323                max_wait_time = 5;
3324                do {
3325                        value = ql_read_common_reg(qdev,
3326                                                   &port_regs->CommonRegs.
3327                                                   ispControlStatus);
3328                        if ((value & ISP_CONTROL_FSR) == 0)
3329                                break;
3330                        ssleep(1);
3331                } while ((--max_wait_time));
3332        }
3333        if (max_wait_time == 0)
3334                status = 1;
3335
3336        clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3337        set_bit(QL_RESET_DONE, &qdev->flags);
3338        return status;
3339}
3340
3341static void ql_set_mac_info(struct ql3_adapter *qdev)
3342{
3343        struct ql3xxx_port_registers __iomem *port_regs =
3344                qdev->mem_map_registers;
3345        u32 value, port_status;
3346        u8 func_number;
3347
3348        /* Get the function number */
3349        value =
3350            ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3351        func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3352        port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3353        switch (value & ISP_CONTROL_FN_MASK) {
3354        case ISP_CONTROL_FN0_NET:
3355                qdev->mac_index = 0;
3356                qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3357                qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3358                qdev->PHYAddr = PORT0_PHY_ADDRESS;
3359                if (port_status & PORT_STATUS_SM0)
3360                        set_bit(QL_LINK_OPTICAL, &qdev->flags);
3361                else
3362                        clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3363                break;
3364
3365        case ISP_CONTROL_FN1_NET:
3366                qdev->mac_index = 1;
3367                qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3368                qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3369                qdev->PHYAddr = PORT1_PHY_ADDRESS;
3370                if (port_status & PORT_STATUS_SM1)
3371                        set_bit(QL_LINK_OPTICAL, &qdev->flags);
3372                else
3373                        clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3374                break;
3375
3376        case ISP_CONTROL_FN0_SCSI:
3377        case ISP_CONTROL_FN1_SCSI:
3378        default:
3379                netdev_printk(KERN_DEBUG, qdev->ndev,
3380                              "Invalid function number, ispControlStatus = 0x%x\n",
3381                              value);
3382                break;
3383        }
3384        qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3385}
3386
3387static void ql_display_dev_info(struct net_device *ndev)
3388{
3389        struct ql3_adapter *qdev = netdev_priv(ndev);
3390        struct pci_dev *pdev = qdev->pdev;
3391
3392        netdev_info(ndev,
3393                    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3394                    DRV_NAME, qdev->index, qdev->chip_rev_id,
3395                    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3396                    qdev->pci_slot);
3397        netdev_info(ndev, "%s Interface\n",
3398                test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3399
3400        /*
3401         * Print PCI bus width/type.
3402         */
3403        netdev_info(ndev, "Bus interface is %s %s\n",
3404                    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3405                    ((qdev->pci_x) ? "PCI-X" : "PCI"));
3406
3407        netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
3408                    qdev->mem_map_registers);
3409        netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3410
3411        netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3412}
3413
3414static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3415{
3416        struct net_device *ndev = qdev->ndev;
3417        int retval = 0;
3418
3419        netif_stop_queue(ndev);
3420        netif_carrier_off(ndev);
3421
3422        clear_bit(QL_ADAPTER_UP, &qdev->flags);
3423        clear_bit(QL_LINK_MASTER, &qdev->flags);
3424
3425        ql_disable_interrupts(qdev);
3426
3427        free_irq(qdev->pdev->irq, ndev);
3428
3429        if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3430                netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3431                clear_bit(QL_MSI_ENABLED, &qdev->flags);
3432                pci_disable_msi(qdev->pdev);
3433        }
3434
3435        del_timer_sync(&qdev->adapter_timer);
3436
3437        napi_disable(&qdev->napi);
3438
3439        if (do_reset) {
3440                int soft_reset;
3441                unsigned long hw_flags;
3442
3443                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3444                if (ql_wait_for_drvr_lock(qdev)) {
3445                        soft_reset = ql_adapter_reset(qdev);
3446                        if (soft_reset) {
3447                                netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3448                                           qdev->index);
3449                        }
3450                        netdev_err(ndev,
3451                                   "Releasing driver lock via chip reset\n");
3452                } else {
3453                        netdev_err(ndev,
3454                                   "Could not acquire driver lock to do reset!\n");
3455                        retval = -1;
3456                }
3457                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3458        }
3459        ql_free_mem_resources(qdev);
3460        return retval;
3461}
3462
3463static int ql_adapter_up(struct ql3_adapter *qdev)
3464{
3465        struct net_device *ndev = qdev->ndev;
3466        int err;
3467        unsigned long irq_flags = IRQF_SHARED;
3468        unsigned long hw_flags;
3469
3470        if (ql_alloc_mem_resources(qdev)) {
3471                netdev_err(ndev, "Unable to  allocate buffers\n");
3472                return -ENOMEM;
3473        }
3474
3475        if (qdev->msi) {
3476                if (pci_enable_msi(qdev->pdev)) {
3477                        netdev_err(ndev,
3478                                   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
3479                        qdev->msi = 0;
3480                } else {
3481                        netdev_info(ndev, "MSI Enabled...\n");
3482                        set_bit(QL_MSI_ENABLED, &qdev->flags);
3483                        irq_flags &= ~IRQF_SHARED;
3484                }
3485        }
3486
3487        err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3488                          irq_flags, ndev->name, ndev);
3489        if (err) {
3490                netdev_err(ndev,
3491                           "Failed to reserve interrupt %d - already in use\n",
3492                           qdev->pdev->irq);
3493                goto err_irq;
3494        }
3495
3496        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3497
3498        err = ql_wait_for_drvr_lock(qdev);
3499        if (err) {
3500                err = ql_adapter_initialize(qdev);
3501                if (err) {
3502                        netdev_err(ndev, "Unable to initialize adapter\n");
3503                        goto err_init;
3504                }
3505                netdev_err(ndev, "Releasing driver lock\n");
3506                ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3507        } else {
3508                netdev_err(ndev, "Could not acquire driver lock\n");
3509                goto err_lock;
3510        }
3511
3512        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3513
3514        set_bit(QL_ADAPTER_UP, &qdev->flags);
3515
3516        mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3517
3518        napi_enable(&qdev->napi);
3519        ql_enable_interrupts(qdev);
3520        return 0;
3521
3522err_init:
3523        ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3524err_lock:
3525        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3526        free_irq(qdev->pdev->irq, ndev);
3527err_irq:
3528        if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3529                netdev_info(ndev, "calling pci_disable_msi()\n");
3530                clear_bit(QL_MSI_ENABLED, &qdev->flags);
3531                pci_disable_msi(qdev->pdev);
3532        }
3533        return err;
3534}
3535
3536static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3537{
3538        if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3539                netdev_err(qdev->ndev,
3540                           "Driver up/down cycle failed, closing device\n");
3541                rtnl_lock();
3542                dev_close(qdev->ndev);
3543                rtnl_unlock();
3544                return -1;
3545        }
3546        return 0;
3547}
3548
3549static int ql3xxx_close(struct net_device *ndev)
3550{
3551        struct ql3_adapter *qdev = netdev_priv(ndev);
3552
3553        /*
3554         * Wait for device to recover from a reset.
3555         * (Rarely happens, but possible.)
3556         */
3557        while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3558                msleep(50);
3559
3560        ql_adapter_down(qdev, QL_DO_RESET);
3561        return 0;
3562}
3563
3564static int ql3xxx_open(struct net_device *ndev)
3565{
3566        struct ql3_adapter *qdev = netdev_priv(ndev);
3567        return ql_adapter_up(qdev);
3568}
3569
3570static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3571{
3572        struct ql3_adapter *qdev = netdev_priv(ndev);
3573        struct ql3xxx_port_registers __iomem *port_regs =
3574                        qdev->mem_map_registers;
3575        struct sockaddr *addr = p;
3576        unsigned long hw_flags;
3577
3578        if (netif_running(ndev))
3579                return -EBUSY;
3580
3581        if (!is_valid_ether_addr(addr->sa_data))
3582                return -EADDRNOTAVAIL;
3583
3584        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3585
3586        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3587        /* Program lower 32 bits of the MAC address */
3588        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3589                           (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3590        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3591                           ((ndev->dev_addr[2] << 24) | (ndev->
3592                                                         dev_addr[3] << 16) |
3593                            (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3594
3595        /* Program top 16 bits of the MAC address */
3596        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3597                           ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3598        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3599                           ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3600        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3601
3602        return 0;
3603}
3604
3605static void ql3xxx_tx_timeout(struct net_device *ndev)
3606{
3607        struct ql3_adapter *qdev = netdev_priv(ndev);
3608
3609        netdev_err(ndev, "Resetting...\n");
3610        /*
3611         * Stop the queues, we've got a problem.
3612         */
3613        netif_stop_queue(ndev);
3614
3615        /*
3616         * Wake up the worker to process this event.
3617         */
3618        queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3619}
3620
3621static void ql_reset_work(struct work_struct *work)
3622{
3623        struct ql3_adapter *qdev =
3624                container_of(work, struct ql3_adapter, reset_work.work);
3625        struct net_device *ndev = qdev->ndev;
3626        u32 value;
3627        struct ql_tx_buf_cb *tx_cb;
3628        int max_wait_time, i;
3629        struct ql3xxx_port_registers __iomem *port_regs =
3630                qdev->mem_map_registers;
3631        unsigned long hw_flags;
3632
3633        if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3634                clear_bit(QL_LINK_MASTER, &qdev->flags);
3635
3636                /*
3637                 * Loop through the active list and return the skb.
3638                 */
3639                for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3640                        int j;
3641                        tx_cb = &qdev->tx_buf[i];
3642                        if (tx_cb->skb) {
3643                                netdev_printk(KERN_DEBUG, ndev,
3644                                              "Freeing lost SKB\n");
3645                                pci_unmap_single(qdev->pdev,
3646                                         dma_unmap_addr(&tx_cb->map[0],
3647                                                        mapaddr),
3648                                         dma_unmap_len(&tx_cb->map[0], maplen),
3649                                         PCI_DMA_TODEVICE);
3650                                for (j = 1; j < tx_cb->seg_count; j++) {
3651                                        pci_unmap_page(qdev->pdev,
3652                                               dma_unmap_addr(&tx_cb->map[j],
3653                                                              mapaddr),
3654                                               dma_unmap_len(&tx_cb->map[j],
3655                                                             maplen),
3656                                               PCI_DMA_TODEVICE);
3657                                }
3658                                dev_kfree_skb(tx_cb->skb);
3659                                tx_cb->skb = NULL;
3660                        }
3661                }
3662
3663                netdev_err(ndev, "Clearing NRI after reset\n");
3664                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3665                ql_write_common_reg(qdev,
3666                                    &port_regs->CommonRegs.
3667                                    ispControlStatus,
3668                                    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3669                /*
3670                 * Wait the for Soft Reset to Complete.
3671                 */
3672                max_wait_time = 10;
3673                do {
3674                        value = ql_read_common_reg(qdev,
3675                                                   &port_regs->CommonRegs.
3676
3677                                                   ispControlStatus);
3678                        if ((value & ISP_CONTROL_SR) == 0) {
3679                                netdev_printk(KERN_DEBUG, ndev,
3680                                              "reset completed\n");
3681                                break;
3682                        }
3683
3684                        if (value & ISP_CONTROL_RI) {
3685                                netdev_printk(KERN_DEBUG, ndev,
3686                                              "clearing NRI after reset\n");
3687                                ql_write_common_reg(qdev,
3688                                                    &port_regs->
3689                                                    CommonRegs.
3690                                                    ispControlStatus,
3691                                                    ((ISP_CONTROL_RI <<
3692                                                      16) | ISP_CONTROL_RI));
3693                        }
3694
3695                        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3696                        ssleep(1);
3697                        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3698                } while (--max_wait_time);
3699                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3700
3701                if (value & ISP_CONTROL_SR) {
3702
3703                        /*
3704                         * Set the reset flags and clear the board again.
3705                         * Nothing else to do...
3706                         */
3707                        netdev_err(ndev,
3708                                   "Timed out waiting for reset to complete\n");
3709                        netdev_err(ndev, "Do a reset\n");
3710                        clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3711                        clear_bit(QL_RESET_START, &qdev->flags);
3712                        ql_cycle_adapter(qdev, QL_DO_RESET);
3713                        return;
3714                }
3715
3716                clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3717                clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3718                clear_bit(QL_RESET_START, &qdev->flags);
3719                ql_cycle_adapter(qdev, QL_NO_RESET);
3720        }
3721}
3722
3723static void ql_tx_timeout_work(struct work_struct *work)
3724{
3725        struct ql3_adapter *qdev =
3726                container_of(work, struct ql3_adapter, tx_timeout_work.work);
3727
3728        ql_cycle_adapter(qdev, QL_DO_RESET);
3729}
3730
3731static void ql_get_board_info(struct ql3_adapter *qdev)
3732{
3733        struct ql3xxx_port_registers __iomem *port_regs =
3734                qdev->mem_map_registers;
3735        u32 value;
3736
3737        value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3738
3739        qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3740        if (value & PORT_STATUS_64)
3741                qdev->pci_width = 64;
3742        else
3743                qdev->pci_width = 32;
3744        if (value & PORT_STATUS_X)
3745                qdev->pci_x = 1;
3746        else
3747                qdev->pci_x = 0;
3748        qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3749}
3750
3751static void ql3xxx_timer(unsigned long ptr)
3752{
3753        struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3754        queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3755}
3756
3757static const struct net_device_ops ql3xxx_netdev_ops = {
3758        .ndo_open               = ql3xxx_open,
3759        .ndo_start_xmit         = ql3xxx_send,
3760        .ndo_stop               = ql3xxx_close,
3761        .ndo_change_mtu_rh74    = eth_change_mtu,
3762        .ndo_validate_addr      = eth_validate_addr,
3763        .ndo_set_mac_address    = ql3xxx_set_mac_address,
3764        .ndo_tx_timeout         = ql3xxx_tx_timeout,
3765};
3766
3767static int ql3xxx_probe(struct pci_dev *pdev,
3768                        const struct pci_device_id *pci_entry)
3769{
3770        struct net_device *ndev = NULL;
3771        struct ql3_adapter *qdev = NULL;
3772        static int cards_found;
3773        int uninitialized_var(pci_using_dac), err;
3774
3775        err = pci_enable_device(pdev);
3776        if (err) {
3777                pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3778                goto err_out;
3779        }
3780
3781        err = pci_request_regions(pdev, DRV_NAME);
3782        if (err) {
3783                pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3784                goto err_out_disable_pdev;
3785        }
3786
3787        pci_set_master(pdev);
3788
3789        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3790                pci_using_dac = 1;
3791                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3792        } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3793                pci_using_dac = 0;
3794                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3795        }
3796
3797        if (err) {
3798                pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3799                goto err_out_free_regions;
3800        }
3801
3802        ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3803        if (!ndev) {
3804                err = -ENOMEM;
3805                goto err_out_free_regions;
3806        }
3807
3808        SET_NETDEV_DEV(ndev, &pdev->dev);
3809
3810        pci_set_drvdata(pdev, ndev);
3811
3812        qdev = netdev_priv(ndev);
3813        qdev->index = cards_found;
3814        qdev->ndev = ndev;
3815        qdev->pdev = pdev;
3816        qdev->device_id = pci_entry->device;
3817        qdev->port_link_state = LS_DOWN;
3818        if (msi)
3819                qdev->msi = 1;
3820
3821        qdev->msg_enable = netif_msg_init(debug, default_msg);
3822
3823        if (pci_using_dac)
3824                ndev->features |= NETIF_F_HIGHDMA;
3825        if (qdev->device_id == QL3032_DEVICE_ID)
3826                ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3827
3828        qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3829        if (!qdev->mem_map_registers) {
3830                pr_err("%s: cannot map device registers\n", pci_name(pdev));
3831                err = -EIO;
3832                goto err_out_free_ndev;
3833        }
3834
3835        spin_lock_init(&qdev->adapter_lock);
3836        spin_lock_init(&qdev->hw_lock);
3837
3838        /* Set driver entry points */
3839        ndev->netdev_ops = &ql3xxx_netdev_ops;
3840        SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3841        ndev->watchdog_timeo = 5 * HZ;
3842
3843        netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3844
3845        ndev->irq = pdev->irq;
3846
3847        /* make sure the EEPROM is good */
3848        if (ql_get_nvram_params(qdev)) {
3849                pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3850                         __func__, qdev->index);
3851                err = -EIO;
3852                goto err_out_iounmap;
3853        }
3854
3855        ql_set_mac_info(qdev);
3856
3857        /* Validate and set parameters */
3858        if (qdev->mac_index) {
3859                ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3860                ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3861        } else {
3862                ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3863                ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3864        }
3865
3866        ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3867
3868        /* Record PCI bus information. */
3869        ql_get_board_info(qdev);
3870
3871        /*
3872         * Set the Maximum Memory Read Byte Count value. We do this to handle
3873         * jumbo frames.
3874         */
3875        if (qdev->pci_x)
3876                pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3877
3878        err = register_netdev(ndev);
3879        if (err) {
3880                pr_err("%s: cannot register net device\n", pci_name(pdev));
3881                goto err_out_iounmap;
3882        }
3883
3884        /* we're going to reset, so assume we have no link for now */
3885
3886        netif_carrier_off(ndev);
3887        netif_stop_queue(ndev);
3888
3889        qdev->workqueue = create_singlethread_workqueue(ndev->name);
3890        INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3891        INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3892        INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3893
3894        init_timer(&qdev->adapter_timer);
3895        qdev->adapter_timer.function = ql3xxx_timer;
3896        qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3897        qdev->adapter_timer.data = (unsigned long)qdev;
3898
3899        if (!cards_found) {
3900                pr_alert("%s\n", DRV_STRING);
3901                pr_alert("Driver name: %s, Version: %s\n",
3902                         DRV_NAME, DRV_VERSION);
3903        }
3904        ql_display_dev_info(ndev);
3905
3906        cards_found++;
3907        return 0;
3908
3909err_out_iounmap:
3910        iounmap(qdev->mem_map_registers);
3911err_out_free_ndev:
3912        free_netdev(ndev);
3913err_out_free_regions:
3914        pci_release_regions(pdev);
3915err_out_disable_pdev:
3916        pci_disable_device(pdev);
3917        pci_set_drvdata(pdev, NULL);
3918err_out:
3919        return err;
3920}
3921
3922static void ql3xxx_remove(struct pci_dev *pdev)
3923{
3924        struct net_device *ndev = pci_get_drvdata(pdev);
3925        struct ql3_adapter *qdev = netdev_priv(ndev);
3926
3927        unregister_netdev(ndev);
3928
3929        ql_disable_interrupts(qdev);
3930
3931        if (qdev->workqueue) {
3932                cancel_delayed_work(&qdev->reset_work);
3933                cancel_delayed_work(&qdev->tx_timeout_work);
3934                destroy_workqueue(qdev->workqueue);
3935                qdev->workqueue = NULL;
3936        }
3937
3938        iounmap(qdev->mem_map_registers);
3939        pci_release_regions(pdev);
3940        pci_set_drvdata(pdev, NULL);
3941        free_netdev(ndev);
3942}
3943
3944static struct pci_driver ql3xxx_driver = {
3945
3946        .name = DRV_NAME,
3947        .id_table = ql3xxx_pci_tbl,
3948        .probe = ql3xxx_probe,
3949        .remove = ql3xxx_remove,
3950};
3951
3952module_pci_driver(ql3xxx_driver);
3953