linux/drivers/net/qla3xxx.c
<<
>>
Prefs
   1/*
   2 * QLogic QLA3xxx NIC HBA Driver
   3 * Copyright (c)  2003-2006 QLogic Corporation
   4 *
   5 * See LICENSE.qla3xxx for copyright and licensing details.
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/sched.h>
  16#include <linux/slab.h>
  17#include <linux/dmapool.h>
  18#include <linux/mempool.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/interrupt.h>
  22#include <linux/errno.h>
  23#include <linux/ioport.h>
  24#include <linux/ip.h>
  25#include <linux/in.h>
  26#include <linux/if_arp.h>
  27#include <linux/if_ether.h>
  28#include <linux/netdevice.h>
  29#include <linux/etherdevice.h>
  30#include <linux/ethtool.h>
  31#include <linux/skbuff.h>
  32#include <linux/rtnetlink.h>
  33#include <linux/if_vlan.h>
  34#include <linux/delay.h>
  35#include <linux/mm.h>
  36
  37#include "qla3xxx.h"
  38
  39#define DRV_NAME        "qla3xxx"
  40#define DRV_STRING      "QLogic ISP3XXX Network Driver"
  41#define DRV_VERSION     "v2.03.00-k5"
  42#define PFX             DRV_NAME " "
  43
  44static const char ql3xxx_driver_name[] = DRV_NAME;
  45static const char ql3xxx_driver_version[] = DRV_VERSION;
  46
  47MODULE_AUTHOR("QLogic Corporation");
  48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  49MODULE_LICENSE("GPL");
  50MODULE_VERSION(DRV_VERSION);
  51
  52static const u32 default_msg
  53    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  54    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  55
  56static int debug = -1;          /* defaults above */
  57module_param(debug, int, 0);
  58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  59
  60static int msi;
  61module_param(msi, int, 0);
  62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  63
  64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
  65        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  66        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  67        /* required last entry */
  68        {0,}
  69};
  70
  71MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  72
  73/*
  74 *  These are the known PHY's which are used
  75 */
  76typedef enum {
  77   PHY_TYPE_UNKNOWN   = 0,
  78   PHY_VITESSE_VSC8211,
  79   PHY_AGERE_ET1011C,
  80   MAX_PHY_DEV_TYPES
  81} PHY_DEVICE_et;
  82
  83typedef struct {
  84        PHY_DEVICE_et phyDevice;
  85        u32             phyIdOUI;
  86        u16             phyIdModel;
  87        char            *name;
  88} PHY_DEVICE_INFO_t;
  89
  90static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
  91        {{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  92         {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  93         {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  94};
  95
  96
  97/*
  98 * Caller must take hw_lock.
  99 */
 100static int ql_sem_spinlock(struct ql3_adapter *qdev,
 101                            u32 sem_mask, u32 sem_bits)
 102{
 103        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 104        u32 value;
 105        unsigned int seconds = 3;
 106
 107        do {
 108                writel((sem_mask | sem_bits),
 109                       &port_regs->CommonRegs.semaphoreReg);
 110                value = readl(&port_regs->CommonRegs.semaphoreReg);
 111                if ((value & (sem_mask >> 16)) == sem_bits)
 112                        return 0;
 113                ssleep(1);
 114        } while(--seconds);
 115        return -1;
 116}
 117
 118static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 119{
 120        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 121        writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
 122        readl(&port_regs->CommonRegs.semaphoreReg);
 123}
 124
 125static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 126{
 127        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 128        u32 value;
 129
 130        writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
 131        value = readl(&port_regs->CommonRegs.semaphoreReg);
 132        return ((value & (sem_mask >> 16)) == sem_bits);
 133}
 134
 135/*
 136 * Caller holds hw_lock.
 137 */
 138static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 139{
 140        int i = 0;
 141
 142        while (1) {
 143                if (!ql_sem_lock(qdev,
 144                                 QL_DRVR_SEM_MASK,
 145                                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
 146                                  * 2) << 1)) {
 147                        if (i < 10) {
 148                                ssleep(1);
 149                                i++;
 150                        } else {
 151                                printk(KERN_ERR PFX "%s: Timed out waiting for "
 152                                       "driver lock...\n",
 153                                       qdev->ndev->name);
 154                                return 0;
 155                        }
 156                } else {
 157                        printk(KERN_DEBUG PFX
 158                               "%s: driver lock acquired.\n",
 159                               qdev->ndev->name);
 160                        return 1;
 161                }
 162        }
 163}
 164
 165static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
 166{
 167        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 168
 169        writel(((ISP_CONTROL_NP_MASK << 16) | page),
 170                        &port_regs->CommonRegs.ispControlStatus);
 171        readl(&port_regs->CommonRegs.ispControlStatus);
 172        qdev->current_page = page;
 173}
 174
 175static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
 176                              u32 __iomem * reg)
 177{
 178        u32 value;
 179        unsigned long hw_flags;
 180
 181        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 182        value = readl(reg);
 183        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 184
 185        return value;
 186}
 187
 188static u32 ql_read_common_reg(struct ql3_adapter *qdev,
 189                              u32 __iomem * reg)
 190{
 191        return readl(reg);
 192}
 193
 194static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 195{
 196        u32 value;
 197        unsigned long hw_flags;
 198
 199        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 200
 201        if (qdev->current_page != 0)
 202                ql_set_register_page(qdev,0);
 203        value = readl(reg);
 204
 205        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 206        return value;
 207}
 208
 209static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 210{
 211        if (qdev->current_page != 0)
 212                ql_set_register_page(qdev,0);
 213        return readl(reg);
 214}
 215
 216static void ql_write_common_reg_l(struct ql3_adapter *qdev,
 217                                u32 __iomem *reg, u32 value)
 218{
 219        unsigned long hw_flags;
 220
 221        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 222        writel(value, reg);
 223        readl(reg);
 224        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 225        return;
 226}
 227
 228static void ql_write_common_reg(struct ql3_adapter *qdev,
 229                                u32 __iomem *reg, u32 value)
 230{
 231        writel(value, reg);
 232        readl(reg);
 233        return;
 234}
 235
 236static void ql_write_nvram_reg(struct ql3_adapter *qdev,
 237                                u32 __iomem *reg, u32 value)
 238{
 239        writel(value, reg);
 240        readl(reg);
 241        udelay(1);
 242        return;
 243}
 244
 245static void ql_write_page0_reg(struct ql3_adapter *qdev,
 246                               u32 __iomem *reg, u32 value)
 247{
 248        if (qdev->current_page != 0)
 249                ql_set_register_page(qdev,0);
 250        writel(value, reg);
 251        readl(reg);
 252        return;
 253}
 254
 255/*
 256 * Caller holds hw_lock. Only called during init.
 257 */
 258static void ql_write_page1_reg(struct ql3_adapter *qdev,
 259                               u32 __iomem *reg, u32 value)
 260{
 261        if (qdev->current_page != 1)
 262                ql_set_register_page(qdev,1);
 263        writel(value, reg);
 264        readl(reg);
 265        return;
 266}
 267
 268/*
 269 * Caller holds hw_lock. Only called during init.
 270 */
 271static void ql_write_page2_reg(struct ql3_adapter *qdev,
 272                               u32 __iomem *reg, u32 value)
 273{
 274        if (qdev->current_page != 2)
 275                ql_set_register_page(qdev,2);
 276        writel(value, reg);
 277        readl(reg);
 278        return;
 279}
 280
 281static void ql_disable_interrupts(struct ql3_adapter *qdev)
 282{
 283        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 284
 285        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 286                            (ISP_IMR_ENABLE_INT << 16));
 287
 288}
 289
 290static void ql_enable_interrupts(struct ql3_adapter *qdev)
 291{
 292        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 293
 294        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 295                            ((0xff << 16) | ISP_IMR_ENABLE_INT));
 296
 297}
 298
 299static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
 300                                            struct ql_rcv_buf_cb *lrg_buf_cb)
 301{
 302        dma_addr_t map;
 303        int err;
 304        lrg_buf_cb->next = NULL;
 305
 306        if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
 307                qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
 308        } else {
 309                qdev->lrg_buf_free_tail->next = lrg_buf_cb;
 310                qdev->lrg_buf_free_tail = lrg_buf_cb;
 311        }
 312
 313        if (!lrg_buf_cb->skb) {
 314                lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
 315                                                   qdev->lrg_buffer_len);
 316                if (unlikely(!lrg_buf_cb->skb)) {
 317                        printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
 318                               qdev->ndev->name);
 319                        qdev->lrg_buf_skb_check++;
 320                } else {
 321                        /*
 322                         * We save some space to copy the ethhdr from first
 323                         * buffer
 324                         */
 325                        skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
 326                        map = pci_map_single(qdev->pdev,
 327                                             lrg_buf_cb->skb->data,
 328                                             qdev->lrg_buffer_len -
 329                                             QL_HEADER_SPACE,
 330                                             PCI_DMA_FROMDEVICE);
 331                        err = pci_dma_mapping_error(qdev->pdev, map);
 332                        if(err) {
 333                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
 334                                       qdev->ndev->name, err);
 335                                dev_kfree_skb(lrg_buf_cb->skb);
 336                                lrg_buf_cb->skb = NULL;
 337
 338                                qdev->lrg_buf_skb_check++;
 339                                return;
 340                        }
 341
 342                        lrg_buf_cb->buf_phy_addr_low =
 343                            cpu_to_le32(LS_64BITS(map));
 344                        lrg_buf_cb->buf_phy_addr_high =
 345                            cpu_to_le32(MS_64BITS(map));
 346                        pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
 347                        pci_unmap_len_set(lrg_buf_cb, maplen,
 348                                          qdev->lrg_buffer_len -
 349                                          QL_HEADER_SPACE);
 350                }
 351        }
 352
 353        qdev->lrg_buf_free_count++;
 354}
 355
 356static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
 357                                                           *qdev)
 358{
 359        struct ql_rcv_buf_cb *lrg_buf_cb;
 360
 361        if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
 362                if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
 363                        qdev->lrg_buf_free_tail = NULL;
 364                qdev->lrg_buf_free_count--;
 365        }
 366
 367        return lrg_buf_cb;
 368}
 369
 370static u32 addrBits = EEPROM_NO_ADDR_BITS;
 371static u32 dataBits = EEPROM_NO_DATA_BITS;
 372
 373static void fm93c56a_deselect(struct ql3_adapter *qdev);
 374static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
 375                            unsigned short *value);
 376
 377/*
 378 * Caller holds hw_lock.
 379 */
 380static void fm93c56a_select(struct ql3_adapter *qdev)
 381{
 382        struct ql3xxx_port_registers __iomem *port_regs =
 383                        qdev->mem_map_registers;
 384
 385        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
 386        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 387                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 388        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 389                            ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 390}
 391
 392/*
 393 * Caller holds hw_lock.
 394 */
 395static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
 396{
 397        int i;
 398        u32 mask;
 399        u32 dataBit;
 400        u32 previousBit;
 401        struct ql3xxx_port_registers __iomem *port_regs =
 402                        qdev->mem_map_registers;
 403
 404        /* Clock in a zero, then do the start bit */
 405        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 406                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 407                            AUBURN_EEPROM_DO_1);
 408        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 409                            ISP_NVRAM_MASK | qdev->
 410                            eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
 411                            AUBURN_EEPROM_CLK_RISE);
 412        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 413                            ISP_NVRAM_MASK | qdev->
 414                            eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
 415                            AUBURN_EEPROM_CLK_FALL);
 416
 417        mask = 1 << (FM93C56A_CMD_BITS - 1);
 418        /* Force the previous data bit to be different */
 419        previousBit = 0xffff;
 420        for (i = 0; i < FM93C56A_CMD_BITS; i++) {
 421                dataBit =
 422                    (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
 423                if (previousBit != dataBit) {
 424                        /*
 425                         * If the bit changed, then change the DO state to
 426                         * match
 427                         */
 428                        ql_write_nvram_reg(qdev,
 429                                            &port_regs->CommonRegs.
 430                                            serialPortInterfaceReg,
 431                                            ISP_NVRAM_MASK | qdev->
 432                                            eeprom_cmd_data | dataBit);
 433                        previousBit = dataBit;
 434                }
 435                ql_write_nvram_reg(qdev,
 436                                    &port_regs->CommonRegs.
 437                                    serialPortInterfaceReg,
 438                                    ISP_NVRAM_MASK | qdev->
 439                                    eeprom_cmd_data | dataBit |
 440                                    AUBURN_EEPROM_CLK_RISE);
 441                ql_write_nvram_reg(qdev,
 442                                    &port_regs->CommonRegs.
 443                                    serialPortInterfaceReg,
 444                                    ISP_NVRAM_MASK | qdev->
 445                                    eeprom_cmd_data | dataBit |
 446                                    AUBURN_EEPROM_CLK_FALL);
 447                cmd = cmd << 1;
 448        }
 449
 450        mask = 1 << (addrBits - 1);
 451        /* Force the previous data bit to be different */
 452        previousBit = 0xffff;
 453        for (i = 0; i < addrBits; i++) {
 454                dataBit =
 455                    (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
 456                    AUBURN_EEPROM_DO_0;
 457                if (previousBit != dataBit) {
 458                        /*
 459                         * If the bit changed, then change the DO state to
 460                         * match
 461                         */
 462                        ql_write_nvram_reg(qdev,
 463                                            &port_regs->CommonRegs.
 464                                            serialPortInterfaceReg,
 465                                            ISP_NVRAM_MASK | qdev->
 466                                            eeprom_cmd_data | dataBit);
 467                        previousBit = dataBit;
 468                }
 469                ql_write_nvram_reg(qdev,
 470                                    &port_regs->CommonRegs.
 471                                    serialPortInterfaceReg,
 472                                    ISP_NVRAM_MASK | qdev->
 473                                    eeprom_cmd_data | dataBit |
 474                                    AUBURN_EEPROM_CLK_RISE);
 475                ql_write_nvram_reg(qdev,
 476                                    &port_regs->CommonRegs.
 477                                    serialPortInterfaceReg,
 478                                    ISP_NVRAM_MASK | qdev->
 479                                    eeprom_cmd_data | dataBit |
 480                                    AUBURN_EEPROM_CLK_FALL);
 481                eepromAddr = eepromAddr << 1;
 482        }
 483}
 484
 485/*
 486 * Caller holds hw_lock.
 487 */
 488static void fm93c56a_deselect(struct ql3_adapter *qdev)
 489{
 490        struct ql3xxx_port_registers __iomem *port_regs =
 491                        qdev->mem_map_registers;
 492        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
 493        ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 494                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 495}
 496
 497/*
 498 * Caller holds hw_lock.
 499 */
 500static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
 501{
 502        int i;
 503        u32 data = 0;
 504        u32 dataBit;
 505        struct ql3xxx_port_registers __iomem *port_regs =
 506                        qdev->mem_map_registers;
 507
 508        /* Read the data bits */
 509        /* The first bit is a dummy.  Clock right over it. */
 510        for (i = 0; i < dataBits; i++) {
 511                ql_write_nvram_reg(qdev,
 512                                    &port_regs->CommonRegs.
 513                                    serialPortInterfaceReg,
 514                                    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 515                                    AUBURN_EEPROM_CLK_RISE);
 516                ql_write_nvram_reg(qdev,
 517                                    &port_regs->CommonRegs.
 518                                    serialPortInterfaceReg,
 519                                    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 520                                    AUBURN_EEPROM_CLK_FALL);
 521                dataBit =
 522                    (ql_read_common_reg
 523                     (qdev,
 524                      &port_regs->CommonRegs.
 525                      serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
 526                data = (data << 1) | dataBit;
 527        }
 528        *value = (u16) data;
 529}
 530
 531/*
 532 * Caller holds hw_lock.
 533 */
 534static void eeprom_readword(struct ql3_adapter *qdev,
 535                            u32 eepromAddr, unsigned short *value)
 536{
 537        fm93c56a_select(qdev);
 538        fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
 539        fm93c56a_datain(qdev, value);
 540        fm93c56a_deselect(qdev);
 541}
 542
 543static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
 544{
 545        __le16 *p = (__le16 *)ndev->dev_addr;
 546        p[0] = cpu_to_le16(addr[0]);
 547        p[1] = cpu_to_le16(addr[1]);
 548        p[2] = cpu_to_le16(addr[2]);
 549}
 550
 551static int ql_get_nvram_params(struct ql3_adapter *qdev)
 552{
 553        u16 *pEEPROMData;
 554        u16 checksum = 0;
 555        u32 index;
 556        unsigned long hw_flags;
 557
 558        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 559
 560        pEEPROMData = (u16 *) & qdev->nvram_data;
 561        qdev->eeprom_cmd_data = 0;
 562        if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
 563                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 564                         2) << 10)) {
 565                printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
 566                        __func__);
 567                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 568                return -1;
 569        }
 570
 571        for (index = 0; index < EEPROM_SIZE; index++) {
 572                eeprom_readword(qdev, index, pEEPROMData);
 573                checksum += *pEEPROMData;
 574                pEEPROMData++;
 575        }
 576        ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 577
 578        if (checksum != 0) {
 579                printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
 580                       qdev->ndev->name, checksum);
 581                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 582                return -1;
 583        }
 584
 585        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 586        return checksum;
 587}
 588
 589static const u32 PHYAddr[2] = {
 590        PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
 591};
 592
 593static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 594{
 595        struct ql3xxx_port_registers __iomem *port_regs =
 596                        qdev->mem_map_registers;
 597        u32 temp;
 598        int count = 1000;
 599
 600        while (count) {
 601                temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
 602                if (!(temp & MAC_MII_STATUS_BSY))
 603                        return 0;
 604                udelay(10);
 605                count--;
 606        }
 607        return -1;
 608}
 609
 610static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
 611{
 612        struct ql3xxx_port_registers __iomem *port_regs =
 613                        qdev->mem_map_registers;
 614        u32 scanControl;
 615
 616        if (qdev->numPorts > 1) {
 617                /* Auto scan will cycle through multiple ports */
 618                scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
 619        } else {
 620                scanControl = MAC_MII_CONTROL_SC;
 621        }
 622
 623        /*
 624         * Scan register 1 of PHY/PETBI,
 625         * Set up to scan both devices
 626         * The autoscan starts from the first register, completes
 627         * the last one before rolling over to the first
 628         */
 629        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 630                           PHYAddr[0] | MII_SCAN_REGISTER);
 631
 632        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 633                           (scanControl) |
 634                           ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
 635}
 636
 637static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
 638{
 639        u8 ret;
 640        struct ql3xxx_port_registers __iomem *port_regs =
 641                                        qdev->mem_map_registers;
 642
 643        /* See if scan mode is enabled before we turn it off */
 644        if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
 645            (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
 646                /* Scan is enabled */
 647                ret = 1;
 648        } else {
 649                /* Scan is disabled */
 650                ret = 0;
 651        }
 652
 653        /*
 654         * When disabling scan mode you must first change the MII register
 655         * address
 656         */
 657        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 658                           PHYAddr[0] | MII_SCAN_REGISTER);
 659
 660        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 661                           ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
 662                             MAC_MII_CONTROL_RC) << 16));
 663
 664        return ret;
 665}
 666
 667static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
 668                               u16 regAddr, u16 value, u32 phyAddr)
 669{
 670        struct ql3xxx_port_registers __iomem *port_regs =
 671                        qdev->mem_map_registers;
 672        u8 scanWasEnabled;
 673
 674        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 675
 676        if (ql_wait_for_mii_ready(qdev)) {
 677                if (netif_msg_link(qdev))
 678                        printk(KERN_WARNING PFX
 679                               "%s Timed out waiting for management port to "
 680                               "get free before issuing command.\n",
 681                               qdev->ndev->name);
 682                return -1;
 683        }
 684
 685        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 686                           phyAddr | regAddr);
 687
 688        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 689
 690        /* Wait for write to complete 9/10/04 SJP */
 691        if (ql_wait_for_mii_ready(qdev)) {
 692                if (netif_msg_link(qdev))
 693                        printk(KERN_WARNING PFX
 694                               "%s: Timed out waiting for management port to "
 695                               "get free before issuing command.\n",
 696                               qdev->ndev->name);
 697                return -1;
 698        }
 699
 700        if (scanWasEnabled)
 701                ql_mii_enable_scan_mode(qdev);
 702
 703        return 0;
 704}
 705
 706static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
 707                              u16 * value, u32 phyAddr)
 708{
 709        struct ql3xxx_port_registers __iomem *port_regs =
 710                        qdev->mem_map_registers;
 711        u8 scanWasEnabled;
 712        u32 temp;
 713
 714        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 715
 716        if (ql_wait_for_mii_ready(qdev)) {
 717                if (netif_msg_link(qdev))
 718                        printk(KERN_WARNING PFX
 719                               "%s: Timed out waiting for management port to "
 720                               "get free before issuing command.\n",
 721                               qdev->ndev->name);
 722                return -1;
 723        }
 724
 725        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 726                           phyAddr | regAddr);
 727
 728        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 729                           (MAC_MII_CONTROL_RC << 16));
 730
 731        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 732                           (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 733
 734        /* Wait for the read to complete */
 735        if (ql_wait_for_mii_ready(qdev)) {
 736                if (netif_msg_link(qdev))
 737                        printk(KERN_WARNING PFX
 738                               "%s: Timed out waiting for management port to "
 739                               "get free after issuing command.\n",
 740                               qdev->ndev->name);
 741                return -1;
 742        }
 743
 744        temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 745        *value = (u16) temp;
 746
 747        if (scanWasEnabled)
 748                ql_mii_enable_scan_mode(qdev);
 749
 750        return 0;
 751}
 752
 753static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 754{
 755        struct ql3xxx_port_registers __iomem *port_regs =
 756                        qdev->mem_map_registers;
 757
 758        ql_mii_disable_scan_mode(qdev);
 759
 760        if (ql_wait_for_mii_ready(qdev)) {
 761                if (netif_msg_link(qdev))
 762                        printk(KERN_WARNING PFX
 763                               "%s: Timed out waiting for management port to "
 764                               "get free before issuing command.\n",
 765                               qdev->ndev->name);
 766                return -1;
 767        }
 768
 769        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 770                           qdev->PHYAddr | regAddr);
 771
 772        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 773
 774        /* Wait for write to complete. */
 775        if (ql_wait_for_mii_ready(qdev)) {
 776                if (netif_msg_link(qdev))
 777                        printk(KERN_WARNING PFX
 778                               "%s: Timed out waiting for management port to "
 779                               "get free before issuing command.\n",
 780                               qdev->ndev->name);
 781                return -1;
 782        }
 783
 784        ql_mii_enable_scan_mode(qdev);
 785
 786        return 0;
 787}
 788
 789static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
 790{
 791        u32 temp;
 792        struct ql3xxx_port_registers __iomem *port_regs =
 793                        qdev->mem_map_registers;
 794
 795        ql_mii_disable_scan_mode(qdev);
 796
 797        if (ql_wait_for_mii_ready(qdev)) {
 798                if (netif_msg_link(qdev))
 799                        printk(KERN_WARNING PFX
 800                               "%s: Timed out waiting for management port to "
 801                               "get free before issuing command.\n",
 802                               qdev->ndev->name);
 803                return -1;
 804        }
 805
 806        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 807                           qdev->PHYAddr | regAddr);
 808
 809        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 810                           (MAC_MII_CONTROL_RC << 16));
 811
 812        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 813                           (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 814
 815        /* Wait for the read to complete */
 816        if (ql_wait_for_mii_ready(qdev)) {
 817                if (netif_msg_link(qdev))
 818                        printk(KERN_WARNING PFX
 819                               "%s: Timed out waiting for management port to "
 820                               "get free before issuing command.\n",
 821                               qdev->ndev->name);
 822                return -1;
 823        }
 824
 825        temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 826        *value = (u16) temp;
 827
 828        ql_mii_enable_scan_mode(qdev);
 829
 830        return 0;
 831}
 832
 833static void ql_petbi_reset(struct ql3_adapter *qdev)
 834{
 835        ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
 836}
 837
 838static void ql_petbi_start_neg(struct ql3_adapter *qdev)
 839{
 840        u16 reg;
 841
 842        /* Enable Auto-negotiation sense */
 843        ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
 844        reg |= PETBI_TBI_AUTO_SENSE;
 845        ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
 846
 847        ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
 848                         PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
 849
 850        ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
 851                         PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 852                         PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
 853
 854}
 855
 856static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
 857{
 858        ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
 859                            PHYAddr[qdev->mac_index]);
 860}
 861
 862static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
 863{
 864        u16 reg;
 865
 866        /* Enable Auto-negotiation sense */
 867        ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
 868                           PHYAddr[qdev->mac_index]);
 869        reg |= PETBI_TBI_AUTO_SENSE;
 870        ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
 871                            PHYAddr[qdev->mac_index]);
 872
 873        ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
 874                            PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
 875                            PHYAddr[qdev->mac_index]);
 876
 877        ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
 878                            PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 879                            PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
 880                            PHYAddr[qdev->mac_index]);
 881}
 882
 883static void ql_petbi_init(struct ql3_adapter *qdev)
 884{
 885        ql_petbi_reset(qdev);
 886        ql_petbi_start_neg(qdev);
 887}
 888
 889static void ql_petbi_init_ex(struct ql3_adapter *qdev)
 890{
 891        ql_petbi_reset_ex(qdev);
 892        ql_petbi_start_neg_ex(qdev);
 893}
 894
 895static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
 896{
 897        u16 reg;
 898
 899        if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
 900                return 0;
 901
 902        return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
 903}
 904
 905static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
 906{
 907        printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
 908        /* power down device bit 11 = 1 */
 909        ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
 910        /* enable diagnostic mode bit 2 = 1 */
 911        ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
 912        /* 1000MB amplitude adjust (see Agere errata) */
 913        ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
 914        /* 1000MB amplitude adjust (see Agere errata) */
 915        ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
 916        /* 100MB amplitude adjust (see Agere errata) */
 917        ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
 918        /* 100MB amplitude adjust (see Agere errata) */
 919        ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
 920        /* 10MB amplitude adjust (see Agere errata) */
 921        ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
 922        /* 10MB amplitude adjust (see Agere errata) */
 923        ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
 924        /* point to hidden reg 0x2806 */
 925        ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
 926        /* Write new PHYAD w/bit 5 set */
 927        ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
 928        /*
 929         * Disable diagnostic mode bit 2 = 0
 930         * Power up device bit 11 = 0
 931         * Link up (on) and activity (blink)
 932         */
 933        ql_mii_write_reg(qdev, 0x12, 0x840a);
 934        ql_mii_write_reg(qdev, 0x00, 0x1140);
 935        ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
 936}
 937
 938static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
 939                                 u16 phyIdReg0, u16 phyIdReg1)
 940{
 941        PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
 942        u32   oui;
 943        u16   model;
 944        int i;
 945
 946        if (phyIdReg0 == 0xffff) {
 947                return result;
 948        }
 949
 950        if (phyIdReg1 == 0xffff) {
 951                return result;
 952        }
 953
 954        /* oui is split between two registers */
 955        oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
 956
 957        model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
 958
 959        /* Scan table for this PHY */
 960        for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
 961                if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
 962                {
 963                        result = PHY_DEVICES[i].phyDevice;
 964
 965                        printk(KERN_INFO "%s: Phy: %s\n",
 966                                qdev->ndev->name, PHY_DEVICES[i].name);
 967
 968                        break;
 969                }
 970        }
 971
 972        return result;
 973}
 974
 975static int ql_phy_get_speed(struct ql3_adapter *qdev)
 976{
 977        u16 reg;
 978
 979        switch(qdev->phyType) {
 980        case PHY_AGERE_ET1011C:
 981        {
 982                if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
 983                        return 0;
 984
 985                reg = (reg >> 8) & 3;
 986                break;
 987        }
 988        default:
 989        if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 990                return 0;
 991
 992        reg = (((reg & 0x18) >> 3) & 3);
 993        }
 994
 995        switch(reg) {
 996                case 2:
 997                return SPEED_1000;
 998                case 1:
 999                return SPEED_100;
1000                case 0:
1001                return SPEED_10;
1002                default:
1003                return -1;
1004        }
1005}
1006
1007static int ql_is_full_dup(struct ql3_adapter *qdev)
1008{
1009        u16 reg;
1010
1011        switch(qdev->phyType) {
1012        case PHY_AGERE_ET1011C:
1013        {
1014                if (ql_mii_read_reg(qdev, 0x1A, &reg))
1015                        return 0;
1016
1017                return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1018        }
1019        case PHY_VITESSE_VSC8211:
1020        default:
1021        {
1022                if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1023                        return 0;
1024                return (reg & PHY_AUX_DUPLEX_STAT) != 0;
1025        }
1026        }
1027}
1028
1029static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
1030{
1031        u16 reg;
1032
1033        if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
1034                return 0;
1035
1036        return (reg & PHY_NEG_PAUSE) != 0;
1037}
1038
1039static int PHY_Setup(struct ql3_adapter *qdev)
1040{
1041        u16   reg1;
1042        u16   reg2;
1043        bool  agereAddrChangeNeeded = false;
1044        u32 miiAddr = 0;
1045        int err;
1046
1047        /*  Determine the PHY we are using by reading the ID's */
1048        err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1049        if(err != 0) {
1050                printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1051                       qdev->ndev->name);
1052                return err;
1053        }
1054
1055        err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1056        if(err != 0) {
1057                printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1058                       qdev->ndev->name);
1059                return err;
1060        }
1061
1062        /*  Check if we have a Agere PHY */
1063        if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1064
1065                /* Determine which MII address we should be using
1066                   determined by the index of the card */
1067                if (qdev->mac_index == 0) {
1068                        miiAddr = MII_AGERE_ADDR_1;
1069                } else {
1070                        miiAddr = MII_AGERE_ADDR_2;
1071                }
1072
1073                err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1074                if(err != 0) {
1075                        printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1076                               qdev->ndev->name);
1077                        return err;
1078                }
1079
1080                err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1081                if(err != 0) {
1082                        printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1083                               qdev->ndev->name);
1084                        return err;
1085                }
1086
1087                /*  We need to remember to initialize the Agere PHY */
1088                agereAddrChangeNeeded = true;
1089        }
1090
1091        /*  Determine the particular PHY we have on board to apply
1092            PHY specific initializations */
1093        qdev->phyType = getPhyType(qdev, reg1, reg2);
1094
1095        if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1096                /* need this here so address gets changed */
1097                phyAgereSpecificInit(qdev, miiAddr);
1098        } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1099                printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1100                return -EIO;
1101        }
1102
1103        return 0;
1104}
1105
1106/*
1107 * Caller holds hw_lock.
1108 */
1109static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1110{
1111        struct ql3xxx_port_registers __iomem *port_regs =
1112                        qdev->mem_map_registers;
1113        u32 value;
1114
1115        if (enable)
1116                value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1117        else
1118                value = (MAC_CONFIG_REG_PE << 16);
1119
1120        if (qdev->mac_index)
1121                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1122        else
1123                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1124}
1125
1126/*
1127 * Caller holds hw_lock.
1128 */
1129static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1130{
1131        struct ql3xxx_port_registers __iomem *port_regs =
1132                        qdev->mem_map_registers;
1133        u32 value;
1134
1135        if (enable)
1136                value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1137        else
1138                value = (MAC_CONFIG_REG_SR << 16);
1139
1140        if (qdev->mac_index)
1141                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1142        else
1143                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1144}
1145
1146/*
1147 * Caller holds hw_lock.
1148 */
1149static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1150{
1151        struct ql3xxx_port_registers __iomem *port_regs =
1152                        qdev->mem_map_registers;
1153        u32 value;
1154
1155        if (enable)
1156                value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1157        else
1158                value = (MAC_CONFIG_REG_GM << 16);
1159
1160        if (qdev->mac_index)
1161                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1162        else
1163                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1164}
1165
1166/*
1167 * Caller holds hw_lock.
1168 */
1169static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1170{
1171        struct ql3xxx_port_registers __iomem *port_regs =
1172                        qdev->mem_map_registers;
1173        u32 value;
1174
1175        if (enable)
1176                value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1177        else
1178                value = (MAC_CONFIG_REG_FD << 16);
1179
1180        if (qdev->mac_index)
1181                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1182        else
1183                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1184}
1185
1186/*
1187 * Caller holds hw_lock.
1188 */
1189static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1190{
1191        struct ql3xxx_port_registers __iomem *port_regs =
1192                        qdev->mem_map_registers;
1193        u32 value;
1194
1195        if (enable)
1196                value =
1197                    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1198                     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1199        else
1200                value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1201
1202        if (qdev->mac_index)
1203                ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1204        else
1205                ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1206}
1207
1208/*
1209 * Caller holds hw_lock.
1210 */
1211static int ql_is_fiber(struct ql3_adapter *qdev)
1212{
1213        struct ql3xxx_port_registers __iomem *port_regs =
1214                        qdev->mem_map_registers;
1215        u32 bitToCheck = 0;
1216        u32 temp;
1217
1218        switch (qdev->mac_index) {
1219        case 0:
1220                bitToCheck = PORT_STATUS_SM0;
1221                break;
1222        case 1:
1223                bitToCheck = PORT_STATUS_SM1;
1224                break;
1225        }
1226
1227        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1228        return (temp & bitToCheck) != 0;
1229}
1230
1231static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1232{
1233        u16 reg;
1234        ql_mii_read_reg(qdev, 0x00, &reg);
1235        return (reg & 0x1000) != 0;
1236}
1237
1238/*
1239 * Caller holds hw_lock.
1240 */
1241static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1242{
1243        struct ql3xxx_port_registers __iomem *port_regs =
1244                        qdev->mem_map_registers;
1245        u32 bitToCheck = 0;
1246        u32 temp;
1247
1248        switch (qdev->mac_index) {
1249        case 0:
1250                bitToCheck = PORT_STATUS_AC0;
1251                break;
1252        case 1:
1253                bitToCheck = PORT_STATUS_AC1;
1254                break;
1255        }
1256
1257        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1258        if (temp & bitToCheck) {
1259                if (netif_msg_link(qdev))
1260                        printk(KERN_INFO PFX
1261                               "%s: Auto-Negotiate complete.\n",
1262                               qdev->ndev->name);
1263                return 1;
1264        } else {
1265                if (netif_msg_link(qdev))
1266                        printk(KERN_WARNING PFX
1267                               "%s: Auto-Negotiate incomplete.\n",
1268                               qdev->ndev->name);
1269                return 0;
1270        }
1271}
1272
1273/*
1274 *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1275 */
1276static int ql_is_neg_pause(struct ql3_adapter *qdev)
1277{
1278        if (ql_is_fiber(qdev))
1279                return ql_is_petbi_neg_pause(qdev);
1280        else
1281                return ql_is_phy_neg_pause(qdev);
1282}
1283
1284static int ql_auto_neg_error(struct ql3_adapter *qdev)
1285{
1286        struct ql3xxx_port_registers __iomem *port_regs =
1287                        qdev->mem_map_registers;
1288        u32 bitToCheck = 0;
1289        u32 temp;
1290
1291        switch (qdev->mac_index) {
1292        case 0:
1293                bitToCheck = PORT_STATUS_AE0;
1294                break;
1295        case 1:
1296                bitToCheck = PORT_STATUS_AE1;
1297                break;
1298        }
1299        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1300        return (temp & bitToCheck) != 0;
1301}
1302
1303static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1304{
1305        if (ql_is_fiber(qdev))
1306                return SPEED_1000;
1307        else
1308                return ql_phy_get_speed(qdev);
1309}
1310
1311static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1312{
1313        if (ql_is_fiber(qdev))
1314                return 1;
1315        else
1316                return ql_is_full_dup(qdev);
1317}
1318
1319/*
1320 * Caller holds hw_lock.
1321 */
1322static int ql_link_down_detect(struct ql3_adapter *qdev)
1323{
1324        struct ql3xxx_port_registers __iomem *port_regs =
1325                        qdev->mem_map_registers;
1326        u32 bitToCheck = 0;
1327        u32 temp;
1328
1329        switch (qdev->mac_index) {
1330        case 0:
1331                bitToCheck = ISP_CONTROL_LINK_DN_0;
1332                break;
1333        case 1:
1334                bitToCheck = ISP_CONTROL_LINK_DN_1;
1335                break;
1336        }
1337
1338        temp =
1339            ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1340        return (temp & bitToCheck) != 0;
1341}
1342
1343/*
1344 * Caller holds hw_lock.
1345 */
1346static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1347{
1348        struct ql3xxx_port_registers __iomem *port_regs =
1349                        qdev->mem_map_registers;
1350
1351        switch (qdev->mac_index) {
1352        case 0:
1353                ql_write_common_reg(qdev,
1354                                    &port_regs->CommonRegs.ispControlStatus,
1355                                    (ISP_CONTROL_LINK_DN_0) |
1356                                    (ISP_CONTROL_LINK_DN_0 << 16));
1357                break;
1358
1359        case 1:
1360                ql_write_common_reg(qdev,
1361                                    &port_regs->CommonRegs.ispControlStatus,
1362                                    (ISP_CONTROL_LINK_DN_1) |
1363                                    (ISP_CONTROL_LINK_DN_1 << 16));
1364                break;
1365
1366        default:
1367                return 1;
1368        }
1369
1370        return 0;
1371}
1372
1373/*
1374 * Caller holds hw_lock.
1375 */
1376static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1377{
1378        struct ql3xxx_port_registers __iomem *port_regs =
1379                        qdev->mem_map_registers;
1380        u32 bitToCheck = 0;
1381        u32 temp;
1382
1383        switch (qdev->mac_index) {
1384        case 0:
1385                bitToCheck = PORT_STATUS_F1_ENABLED;
1386                break;
1387        case 1:
1388                bitToCheck = PORT_STATUS_F3_ENABLED;
1389                break;
1390        default:
1391                break;
1392        }
1393
1394        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1395        if (temp & bitToCheck) {
1396                if (netif_msg_link(qdev))
1397                        printk(KERN_DEBUG PFX
1398                               "%s: is not link master.\n", qdev->ndev->name);
1399                return 0;
1400        } else {
1401                if (netif_msg_link(qdev))
1402                        printk(KERN_DEBUG PFX
1403                               "%s: is link master.\n", qdev->ndev->name);
1404                return 1;
1405        }
1406}
1407
1408static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1409{
1410        ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1411                            PHYAddr[qdev->mac_index]);
1412}
1413
1414static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1415{
1416        u16 reg;
1417        u16 portConfiguration;
1418
1419        if(qdev->phyType == PHY_AGERE_ET1011C) {
1420                /* turn off external loopback */
1421                ql_mii_write_reg(qdev, 0x13, 0x0000);
1422        }
1423
1424        if(qdev->mac_index == 0)
1425                portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1426        else
1427                portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1428
1429        /*  Some HBA's in the field are set to 0 and they need to
1430            be reinterpreted with a default value */
1431        if(portConfiguration == 0)
1432                portConfiguration = PORT_CONFIG_DEFAULT;
1433
1434        /* Set the 1000 advertisements */
1435        ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1436                           PHYAddr[qdev->mac_index]);
1437        reg &= ~PHY_GIG_ALL_PARAMS;
1438
1439        if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1440                if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1441                        reg |= PHY_GIG_ADV_1000F;
1442                else
1443                        reg |= PHY_GIG_ADV_1000H;
1444        }
1445
1446        ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1447                            PHYAddr[qdev->mac_index]);
1448
1449        /* Set the 10/100 & pause negotiation advertisements */
1450        ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1451                           PHYAddr[qdev->mac_index]);
1452        reg &= ~PHY_NEG_ALL_PARAMS;
1453
1454        if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1455                reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1456
1457        if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1458                if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1459                        reg |= PHY_NEG_ADV_100F;
1460
1461                if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1462                        reg |= PHY_NEG_ADV_10F;
1463        }
1464
1465        if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1466                if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1467                        reg |= PHY_NEG_ADV_100H;
1468
1469                if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1470                        reg |= PHY_NEG_ADV_10H;
1471        }
1472
1473        if(portConfiguration &
1474           PORT_CONFIG_1000MB_SPEED) {
1475                reg |= 1;
1476        }
1477
1478        ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1479                            PHYAddr[qdev->mac_index]);
1480
1481        ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1482
1483        ql_mii_write_reg_ex(qdev, CONTROL_REG,
1484                            reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1485                            PHYAddr[qdev->mac_index]);
1486}
1487
1488static void ql_phy_init_ex(struct ql3_adapter *qdev)
1489{
1490        ql_phy_reset_ex(qdev);
1491        PHY_Setup(qdev);
1492        ql_phy_start_neg_ex(qdev);
1493}
1494
1495/*
1496 * Caller holds hw_lock.
1497 */
1498static u32 ql_get_link_state(struct ql3_adapter *qdev)
1499{
1500        struct ql3xxx_port_registers __iomem *port_regs =
1501                        qdev->mem_map_registers;
1502        u32 bitToCheck = 0;
1503        u32 temp, linkState;
1504
1505        switch (qdev->mac_index) {
1506        case 0:
1507                bitToCheck = PORT_STATUS_UP0;
1508                break;
1509        case 1:
1510                bitToCheck = PORT_STATUS_UP1;
1511                break;
1512        }
1513        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1514        if (temp & bitToCheck) {
1515                linkState = LS_UP;
1516        } else {
1517                linkState = LS_DOWN;
1518        }
1519        return linkState;
1520}
1521
1522static int ql_port_start(struct ql3_adapter *qdev)
1523{
1524        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1525                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1526                         2) << 7)) {
1527                printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
1528                       qdev->ndev->name);
1529                return -1;
1530        }
1531
1532        if (ql_is_fiber(qdev)) {
1533                ql_petbi_init(qdev);
1534        } else {
1535                /* Copper port */
1536                ql_phy_init_ex(qdev);
1537        }
1538
1539        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540        return 0;
1541}
1542
1543static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1544{
1545
1546        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1547                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1548                         2) << 7))
1549                return -1;
1550
1551        if (!ql_auto_neg_error(qdev)) {
1552                if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1553                        /* configure the MAC */
1554                        if (netif_msg_link(qdev))
1555                                printk(KERN_DEBUG PFX
1556                                       "%s: Configuring link.\n",
1557                                       qdev->ndev->
1558                                       name);
1559                        ql_mac_cfg_soft_reset(qdev, 1);
1560                        ql_mac_cfg_gig(qdev,
1561                                       (ql_get_link_speed
1562                                        (qdev) ==
1563                                        SPEED_1000));
1564                        ql_mac_cfg_full_dup(qdev,
1565                                            ql_is_link_full_dup
1566                                            (qdev));
1567                        ql_mac_cfg_pause(qdev,
1568                                         ql_is_neg_pause
1569                                         (qdev));
1570                        ql_mac_cfg_soft_reset(qdev, 0);
1571
1572                        /* enable the MAC */
1573                        if (netif_msg_link(qdev))
1574                                printk(KERN_DEBUG PFX
1575                                       "%s: Enabling mac.\n",
1576                                       qdev->ndev->
1577                                               name);
1578                        ql_mac_enable(qdev, 1);
1579                }
1580
1581                qdev->port_link_state = LS_UP;
1582                netif_start_queue(qdev->ndev);
1583                netif_carrier_on(qdev->ndev);
1584                if (netif_msg_link(qdev))
1585                        printk(KERN_INFO PFX
1586                               "%s: Link is up at %d Mbps, %s duplex.\n",
1587                               qdev->ndev->name,
1588                               ql_get_link_speed(qdev),
1589                               ql_is_link_full_dup(qdev)
1590                               ? "full" : "half");
1591
1592        } else {        /* Remote error detected */
1593
1594                if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1595                        if (netif_msg_link(qdev))
1596                                printk(KERN_DEBUG PFX
1597                                       "%s: Remote error detected. "
1598                                       "Calling ql_port_start().\n",
1599                                       qdev->ndev->
1600                                       name);
1601                        /*
1602                         * ql_port_start() is shared code and needs
1603                         * to lock the PHY on it's own.
1604                         */
1605                        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1606                        if(ql_port_start(qdev)) {/* Restart port */
1607                                return -1;
1608                        } else
1609                                return 0;
1610                }
1611        }
1612        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1613        return 0;
1614}
1615
1616static void ql_link_state_machine_work(struct work_struct *work)
1617{
1618        struct ql3_adapter *qdev =
1619                container_of(work, struct ql3_adapter, link_state_work.work);
1620
1621        u32 curr_link_state;
1622        unsigned long hw_flags;
1623
1624        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1625
1626        curr_link_state = ql_get_link_state(qdev);
1627
1628        if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1629                if (netif_msg_link(qdev))
1630                        printk(KERN_INFO PFX
1631                               "%s: Reset in progress, skip processing link "
1632                               "state.\n", qdev->ndev->name);
1633
1634                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1635
1636                /* Restart timer on 2 second interval. */
1637                mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
1638
1639                return;
1640        }
1641
1642        switch (qdev->port_link_state) {
1643        default:
1644                if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1645                        ql_port_start(qdev);
1646                }
1647                qdev->port_link_state = LS_DOWN;
1648                /* Fall Through */
1649
1650        case LS_DOWN:
1651                if (curr_link_state == LS_UP) {
1652                        if (netif_msg_link(qdev))
1653                                printk(KERN_INFO PFX "%s: Link is up.\n",
1654                                       qdev->ndev->name);
1655                        if (ql_is_auto_neg_complete(qdev))
1656                                ql_finish_auto_neg(qdev);
1657
1658                        if (qdev->port_link_state == LS_UP)
1659                                ql_link_down_detect_clear(qdev);
1660
1661                        qdev->port_link_state = LS_UP;
1662                }
1663                break;
1664
1665        case LS_UP:
1666                /*
1667                 * See if the link is currently down or went down and came
1668                 * back up
1669                 */
1670                if (curr_link_state == LS_DOWN) {
1671                        if (netif_msg_link(qdev))
1672                                printk(KERN_INFO PFX "%s: Link is down.\n",
1673                                       qdev->ndev->name);
1674                        qdev->port_link_state = LS_DOWN;
1675                }
1676                if (ql_link_down_detect(qdev))
1677                        qdev->port_link_state = LS_DOWN;
1678                break;
1679        }
1680        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1681
1682        /* Restart timer on 2 second interval. */
1683        mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1684}
1685
1686/*
1687 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1688 */
1689static void ql_get_phy_owner(struct ql3_adapter *qdev)
1690{
1691        if (ql_this_adapter_controls_port(qdev))
1692                set_bit(QL_LINK_MASTER,&qdev->flags);
1693        else
1694                clear_bit(QL_LINK_MASTER,&qdev->flags);
1695}
1696
1697/*
1698 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1699 */
1700static void ql_init_scan_mode(struct ql3_adapter *qdev)
1701{
1702        ql_mii_enable_scan_mode(qdev);
1703
1704        if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1705                if (ql_this_adapter_controls_port(qdev))
1706                        ql_petbi_init_ex(qdev);
1707        } else {
1708                if (ql_this_adapter_controls_port(qdev))
1709                        ql_phy_init_ex(qdev);
1710        }
1711}
1712
1713/*
1714 * MII_Setup needs to be called before taking the PHY out of reset so that the
1715 * management interface clock speed can be set properly.  It would be better if
1716 * we had a way to disable MDC until after the PHY is out of reset, but we
1717 * don't have that capability.
1718 */
1719static int ql_mii_setup(struct ql3_adapter *qdev)
1720{
1721        u32 reg;
1722        struct ql3xxx_port_registers __iomem *port_regs =
1723                        qdev->mem_map_registers;
1724
1725        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1726                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1727                         2) << 7))
1728                return -1;
1729
1730        if (qdev->device_id == QL3032_DEVICE_ID)
1731                ql_write_page0_reg(qdev,
1732                        &port_regs->macMIIMgmtControlReg, 0x0f00000);
1733
1734        /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1735        reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1736
1737        ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1738                           reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1739
1740        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1741        return 0;
1742}
1743
1744static u32 ql_supported_modes(struct ql3_adapter *qdev)
1745{
1746        u32 supported;
1747
1748        if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1749                supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1750                    | SUPPORTED_Autoneg;
1751        } else {
1752                supported = SUPPORTED_10baseT_Half
1753                    | SUPPORTED_10baseT_Full
1754                    | SUPPORTED_100baseT_Half
1755                    | SUPPORTED_100baseT_Full
1756                    | SUPPORTED_1000baseT_Half
1757                    | SUPPORTED_1000baseT_Full
1758                    | SUPPORTED_Autoneg | SUPPORTED_TP;
1759        }
1760
1761        return supported;
1762}
1763
1764static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1765{
1766        int status;
1767        unsigned long hw_flags;
1768        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1769        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1770                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1771                         2) << 7)) {
1772                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1773                return 0;
1774        }
1775        status = ql_is_auto_cfg(qdev);
1776        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1777        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1778        return status;
1779}
1780
1781static u32 ql_get_speed(struct ql3_adapter *qdev)
1782{
1783        u32 status;
1784        unsigned long hw_flags;
1785        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1786        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1787                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1788                         2) << 7)) {
1789                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1790                return 0;
1791        }
1792        status = ql_get_link_speed(qdev);
1793        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1794        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1795        return status;
1796}
1797
1798static int ql_get_full_dup(struct ql3_adapter *qdev)
1799{
1800        int status;
1801        unsigned long hw_flags;
1802        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1803        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1804                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1805                         2) << 7)) {
1806                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1807                return 0;
1808        }
1809        status = ql_is_link_full_dup(qdev);
1810        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1811        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1812        return status;
1813}
1814
1815
1816static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1817{
1818        struct ql3_adapter *qdev = netdev_priv(ndev);
1819
1820        ecmd->transceiver = XCVR_INTERNAL;
1821        ecmd->supported = ql_supported_modes(qdev);
1822
1823        if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1824                ecmd->port = PORT_FIBRE;
1825        } else {
1826                ecmd->port = PORT_TP;
1827                ecmd->phy_address = qdev->PHYAddr;
1828        }
1829        ecmd->advertising = ql_supported_modes(qdev);
1830        ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1831        ecmd->speed = ql_get_speed(qdev);
1832        ecmd->duplex = ql_get_full_dup(qdev);
1833        return 0;
1834}
1835
1836static void ql_get_drvinfo(struct net_device *ndev,
1837                           struct ethtool_drvinfo *drvinfo)
1838{
1839        struct ql3_adapter *qdev = netdev_priv(ndev);
1840        strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1841        strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1842        strncpy(drvinfo->fw_version, "N/A", 32);
1843        strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1844        drvinfo->regdump_len = 0;
1845        drvinfo->eedump_len = 0;
1846}
1847
1848static u32 ql_get_msglevel(struct net_device *ndev)
1849{
1850        struct ql3_adapter *qdev = netdev_priv(ndev);
1851        return qdev->msg_enable;
1852}
1853
1854static void ql_set_msglevel(struct net_device *ndev, u32 value)
1855{
1856        struct ql3_adapter *qdev = netdev_priv(ndev);
1857        qdev->msg_enable = value;
1858}
1859
1860static void ql_get_pauseparam(struct net_device *ndev,
1861                              struct ethtool_pauseparam *pause)
1862{
1863        struct ql3_adapter *qdev = netdev_priv(ndev);
1864        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1865
1866        u32 reg;
1867        if(qdev->mac_index == 0)
1868                reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1869        else
1870                reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1871
1872        pause->autoneg  = ql_get_auto_cfg_status(qdev);
1873        pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1874        pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1875}
1876
1877static const struct ethtool_ops ql3xxx_ethtool_ops = {
1878        .get_settings = ql_get_settings,
1879        .get_drvinfo = ql_get_drvinfo,
1880        .get_link = ethtool_op_get_link,
1881        .get_msglevel = ql_get_msglevel,
1882        .set_msglevel = ql_set_msglevel,
1883        .get_pauseparam = ql_get_pauseparam,
1884};
1885
1886static int ql_populate_free_queue(struct ql3_adapter *qdev)
1887{
1888        struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1889        dma_addr_t map;
1890        int err;
1891
1892        while (lrg_buf_cb) {
1893                if (!lrg_buf_cb->skb) {
1894                        lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1895                                                           qdev->lrg_buffer_len);
1896                        if (unlikely(!lrg_buf_cb->skb)) {
1897                                printk(KERN_DEBUG PFX
1898                                       "%s: Failed netdev_alloc_skb().\n",
1899                                       qdev->ndev->name);
1900                                break;
1901                        } else {
1902                                /*
1903                                 * We save some space to copy the ethhdr from
1904                                 * first buffer
1905                                 */
1906                                skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1907                                map = pci_map_single(qdev->pdev,
1908                                                     lrg_buf_cb->skb->data,
1909                                                     qdev->lrg_buffer_len -
1910                                                     QL_HEADER_SPACE,
1911                                                     PCI_DMA_FROMDEVICE);
1912
1913                                err = pci_dma_mapping_error(qdev->pdev, map);
1914                                if(err) {
1915                                        printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1916                                               qdev->ndev->name, err);
1917                                        dev_kfree_skb(lrg_buf_cb->skb);
1918                                        lrg_buf_cb->skb = NULL;
1919                                        break;
1920                                }
1921
1922
1923                                lrg_buf_cb->buf_phy_addr_low =
1924                                    cpu_to_le32(LS_64BITS(map));
1925                                lrg_buf_cb->buf_phy_addr_high =
1926                                    cpu_to_le32(MS_64BITS(map));
1927                                pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1928                                pci_unmap_len_set(lrg_buf_cb, maplen,
1929                                                  qdev->lrg_buffer_len -
1930                                                  QL_HEADER_SPACE);
1931                                --qdev->lrg_buf_skb_check;
1932                                if (!qdev->lrg_buf_skb_check)
1933                                        return 1;
1934                        }
1935                }
1936                lrg_buf_cb = lrg_buf_cb->next;
1937        }
1938        return 0;
1939}
1940
1941/*
1942 * Caller holds hw_lock.
1943 */
1944static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1945{
1946        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1947        if (qdev->small_buf_release_cnt >= 16) {
1948                while (qdev->small_buf_release_cnt >= 16) {
1949                        qdev->small_buf_q_producer_index++;
1950
1951                        if (qdev->small_buf_q_producer_index ==
1952                            NUM_SBUFQ_ENTRIES)
1953                                qdev->small_buf_q_producer_index = 0;
1954                        qdev->small_buf_release_cnt -= 8;
1955                }
1956                wmb();
1957                writel(qdev->small_buf_q_producer_index,
1958                        &port_regs->CommonRegs.rxSmallQProducerIndex);
1959        }
1960}
1961
1962/*
1963 * Caller holds hw_lock.
1964 */
1965static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1966{
1967        struct bufq_addr_element *lrg_buf_q_ele;
1968        int i;
1969        struct ql_rcv_buf_cb *lrg_buf_cb;
1970        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1971
1972        if ((qdev->lrg_buf_free_count >= 8)
1973            && (qdev->lrg_buf_release_cnt >= 16)) {
1974
1975                if (qdev->lrg_buf_skb_check)
1976                        if (!ql_populate_free_queue(qdev))
1977                                return;
1978
1979                lrg_buf_q_ele = qdev->lrg_buf_next_free;
1980
1981                while ((qdev->lrg_buf_release_cnt >= 16)
1982                       && (qdev->lrg_buf_free_count >= 8)) {
1983
1984                        for (i = 0; i < 8; i++) {
1985                                lrg_buf_cb =
1986                                    ql_get_from_lrg_buf_free_list(qdev);
1987                                lrg_buf_q_ele->addr_high =
1988                                    lrg_buf_cb->buf_phy_addr_high;
1989                                lrg_buf_q_ele->addr_low =
1990                                    lrg_buf_cb->buf_phy_addr_low;
1991                                lrg_buf_q_ele++;
1992
1993                                qdev->lrg_buf_release_cnt--;
1994                        }
1995
1996                        qdev->lrg_buf_q_producer_index++;
1997
1998                        if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1999                                qdev->lrg_buf_q_producer_index = 0;
2000
2001                        if (qdev->lrg_buf_q_producer_index ==
2002                            (qdev->num_lbufq_entries - 1)) {
2003                                lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
2004                        }
2005                }
2006                wmb();
2007                qdev->lrg_buf_next_free = lrg_buf_q_ele;
2008                writel(qdev->lrg_buf_q_producer_index,
2009                        &port_regs->CommonRegs.rxLargeQProducerIndex);
2010        }
2011}
2012
2013static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2014                                   struct ob_mac_iocb_rsp *mac_rsp)
2015{
2016        struct ql_tx_buf_cb *tx_cb;
2017        int i;
2018        int retval = 0;
2019
2020        if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2021                printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2022        }
2023
2024        tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2025
2026        /*  Check the transmit response flags for any errors */
2027        if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2028                printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
2029
2030                qdev->ndev->stats.tx_errors++;
2031                retval = -EIO;
2032                goto frame_not_sent;
2033        }
2034
2035        if(tx_cb->seg_count == 0) {
2036                printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
2037
2038                qdev->ndev->stats.tx_errors++;
2039                retval = -EIO;
2040                goto invalid_seg_count;
2041        }
2042
2043        pci_unmap_single(qdev->pdev,
2044                         pci_unmap_addr(&tx_cb->map[0], mapaddr),
2045                         pci_unmap_len(&tx_cb->map[0], maplen),
2046                         PCI_DMA_TODEVICE);
2047        tx_cb->seg_count--;
2048        if (tx_cb->seg_count) {
2049                for (i = 1; i < tx_cb->seg_count; i++) {
2050                        pci_unmap_page(qdev->pdev,
2051                                       pci_unmap_addr(&tx_cb->map[i],
2052                                                      mapaddr),
2053                                       pci_unmap_len(&tx_cb->map[i], maplen),
2054                                       PCI_DMA_TODEVICE);
2055                }
2056        }
2057        qdev->ndev->stats.tx_packets++;
2058        qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
2059
2060frame_not_sent:
2061        dev_kfree_skb_irq(tx_cb->skb);
2062        tx_cb->skb = NULL;
2063
2064invalid_seg_count:
2065        atomic_inc(&qdev->tx_count);
2066}
2067
2068static void ql_get_sbuf(struct ql3_adapter *qdev)
2069{
2070        if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2071                qdev->small_buf_index = 0;
2072        qdev->small_buf_release_cnt++;
2073}
2074
2075static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2076{
2077        struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2078        lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2079        qdev->lrg_buf_release_cnt++;
2080        if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2081                qdev->lrg_buf_index = 0;
2082        return(lrg_buf_cb);
2083}
2084
2085/*
2086 * The difference between 3022 and 3032 for inbound completions:
2087 * 3022 uses two buffers per completion.  The first buffer contains
2088 * (some) header info, the second the remainder of the headers plus
2089 * the data.  For this chip we reserve some space at the top of the
2090 * receive buffer so that the header info in buffer one can be
2091 * prepended to the buffer two.  Buffer two is the sent up while
2092 * buffer one is returned to the hardware to be reused.
2093 * 3032 receives all of it's data and headers in one buffer for a
2094 * simpler process.  3032 also supports checksum verification as
2095 * can be seen in ql_process_macip_rx_intr().
2096 */
2097static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2098                                   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2099{
2100        struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2101        struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2102        struct sk_buff *skb;
2103        u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2104
2105        /*
2106         * Get the inbound address list (small buffer).
2107         */
2108        ql_get_sbuf(qdev);
2109
2110        if (qdev->device_id == QL3022_DEVICE_ID)
2111                lrg_buf_cb1 = ql_get_lbuf(qdev);
2112
2113        /* start of second buffer */
2114        lrg_buf_cb2 = ql_get_lbuf(qdev);
2115        skb = lrg_buf_cb2->skb;
2116
2117        qdev->ndev->stats.rx_packets++;
2118        qdev->ndev->stats.rx_bytes += length;
2119
2120        skb_put(skb, length);
2121        pci_unmap_single(qdev->pdev,
2122                         pci_unmap_addr(lrg_buf_cb2, mapaddr),
2123                         pci_unmap_len(lrg_buf_cb2, maplen),
2124                         PCI_DMA_FROMDEVICE);
2125        prefetch(skb->data);
2126        skb->ip_summed = CHECKSUM_NONE;
2127        skb->protocol = eth_type_trans(skb, qdev->ndev);
2128
2129        netif_receive_skb(skb);
2130        lrg_buf_cb2->skb = NULL;
2131
2132        if (qdev->device_id == QL3022_DEVICE_ID)
2133                ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2134        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2135}
2136
2137static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2138                                     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2139{
2140        struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2141        struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2142        struct sk_buff *skb1 = NULL, *skb2;
2143        struct net_device *ndev = qdev->ndev;
2144        u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2145        u16 size = 0;
2146
2147        /*
2148         * Get the inbound address list (small buffer).
2149         */
2150
2151        ql_get_sbuf(qdev);
2152
2153        if (qdev->device_id == QL3022_DEVICE_ID) {
2154                /* start of first buffer on 3022 */
2155                lrg_buf_cb1 = ql_get_lbuf(qdev);
2156                skb1 = lrg_buf_cb1->skb;
2157                size = ETH_HLEN;
2158                if (*((u16 *) skb1->data) != 0xFFFF)
2159                        size += VLAN_ETH_HLEN - ETH_HLEN;
2160        }
2161
2162        /* start of second buffer */
2163        lrg_buf_cb2 = ql_get_lbuf(qdev);
2164        skb2 = lrg_buf_cb2->skb;
2165
2166        skb_put(skb2, length);  /* Just the second buffer length here. */
2167        pci_unmap_single(qdev->pdev,
2168                         pci_unmap_addr(lrg_buf_cb2, mapaddr),
2169                         pci_unmap_len(lrg_buf_cb2, maplen),
2170                         PCI_DMA_FROMDEVICE);
2171        prefetch(skb2->data);
2172
2173        skb2->ip_summed = CHECKSUM_NONE;
2174        if (qdev->device_id == QL3022_DEVICE_ID) {
2175                /*
2176                 * Copy the ethhdr from first buffer to second. This
2177                 * is necessary for 3022 IP completions.
2178                 */
2179                skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2180                                                 skb_push(skb2, size), size);
2181        } else {
2182                u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2183                if (checksum &
2184                        (IB_IP_IOCB_RSP_3032_ICE |
2185                         IB_IP_IOCB_RSP_3032_CE)) {
2186                        printk(KERN_ERR
2187                               "%s: Bad checksum for this %s packet, checksum = %x.\n",
2188                               __func__,
2189                               ((checksum &
2190                                IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2191                                "UDP"),checksum);
2192                } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2193                                (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2194                                !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2195                        skb2->ip_summed = CHECKSUM_UNNECESSARY;
2196                }
2197        }
2198        skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2199
2200        netif_receive_skb(skb2);
2201        ndev->stats.rx_packets++;
2202        ndev->stats.rx_bytes += length;
2203        lrg_buf_cb2->skb = NULL;
2204
2205        if (qdev->device_id == QL3022_DEVICE_ID)
2206                ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2207        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2208}
2209
2210static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2211                          int *tx_cleaned, int *rx_cleaned, int work_to_do)
2212{
2213        struct net_rsp_iocb *net_rsp;
2214        struct net_device *ndev = qdev->ndev;
2215        int work_done = 0;
2216
2217        /* While there are entries in the completion queue. */
2218        while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2219                qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2220
2221                net_rsp = qdev->rsp_current;
2222                rmb();
2223                /*
2224                 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
2225                 * inbound completion is for a VLAN.
2226                 */
2227                if (qdev->device_id == QL3032_DEVICE_ID)
2228                        net_rsp->opcode &= 0x7f;
2229                switch (net_rsp->opcode) {
2230
2231                case OPCODE_OB_MAC_IOCB_FN0:
2232                case OPCODE_OB_MAC_IOCB_FN2:
2233                        ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2234                                               net_rsp);
2235                        (*tx_cleaned)++;
2236                        break;
2237
2238                case OPCODE_IB_MAC_IOCB:
2239                case OPCODE_IB_3032_MAC_IOCB:
2240                        ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2241                                               net_rsp);
2242                        (*rx_cleaned)++;
2243                        break;
2244
2245                case OPCODE_IB_IP_IOCB:
2246                case OPCODE_IB_3032_IP_IOCB:
2247                        ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2248                                                 net_rsp);
2249                        (*rx_cleaned)++;
2250                        break;
2251                default:
2252                        {
2253                                u32 *tmp = (u32 *) net_rsp;
2254                                printk(KERN_ERR PFX
2255                                       "%s: Hit default case, not "
2256                                       "handled!\n"
2257                                       "        dropping the packet, opcode = "
2258                                       "%x.\n",
2259                                       ndev->name, net_rsp->opcode);
2260                                printk(KERN_ERR PFX
2261                                       "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
2262                                       (unsigned long int)tmp[0],
2263                                       (unsigned long int)tmp[1],
2264                                       (unsigned long int)tmp[2],
2265                                       (unsigned long int)tmp[3]);
2266                        }
2267                }
2268
2269                qdev->rsp_consumer_index++;
2270
2271                if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2272                        qdev->rsp_consumer_index = 0;
2273                        qdev->rsp_current = qdev->rsp_q_virt_addr;
2274                } else {
2275                        qdev->rsp_current++;
2276                }
2277
2278                work_done = *tx_cleaned + *rx_cleaned;
2279        }
2280
2281        return work_done;
2282}
2283
2284static int ql_poll(struct napi_struct *napi, int budget)
2285{
2286        struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2287        int rx_cleaned = 0, tx_cleaned = 0;
2288        unsigned long hw_flags;
2289        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2290
2291        ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2292
2293        if (tx_cleaned + rx_cleaned != budget) {
2294                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2295                __napi_complete(napi);
2296                ql_update_small_bufq_prod_index(qdev);
2297                ql_update_lrg_bufq_prod_index(qdev);
2298                writel(qdev->rsp_consumer_index,
2299                            &port_regs->CommonRegs.rspQConsumerIndex);
2300                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2301
2302                ql_enable_interrupts(qdev);
2303        }
2304        return tx_cleaned + rx_cleaned;
2305}
2306
2307static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2308{
2309
2310        struct net_device *ndev = dev_id;
2311        struct ql3_adapter *qdev = netdev_priv(ndev);
2312        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2313        u32 value;
2314        int handled = 1;
2315        u32 var;
2316
2317        port_regs = qdev->mem_map_registers;
2318
2319        value =
2320            ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2321
2322        if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2323                spin_lock(&qdev->adapter_lock);
2324                netif_stop_queue(qdev->ndev);
2325                netif_carrier_off(qdev->ndev);
2326                ql_disable_interrupts(qdev);
2327                qdev->port_link_state = LS_DOWN;
2328                set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2329
2330                if (value & ISP_CONTROL_FE) {
2331                        /*
2332                         * Chip Fatal Error.
2333                         */
2334                        var =
2335                            ql_read_page0_reg_l(qdev,
2336                                              &port_regs->PortFatalErrStatus);
2337                        printk(KERN_WARNING PFX
2338                               "%s: Resetting chip. PortFatalErrStatus "
2339                               "register = 0x%x\n", ndev->name, var);
2340                        set_bit(QL_RESET_START,&qdev->flags) ;
2341                } else {
2342                        /*
2343                         * Soft Reset Requested.
2344                         */
2345                        set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2346                        printk(KERN_ERR PFX
2347                               "%s: Another function issued a reset to the "
2348                               "chip. ISR value = %x.\n", ndev->name, value);
2349                }
2350                queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2351                spin_unlock(&qdev->adapter_lock);
2352        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2353                ql_disable_interrupts(qdev);
2354                if (likely(napi_schedule_prep(&qdev->napi))) {
2355                        __napi_schedule(&qdev->napi);
2356                }
2357        } else {
2358                return IRQ_NONE;
2359        }
2360
2361        return IRQ_RETVAL(handled);
2362}
2363
2364/*
2365 * Get the total number of segments needed for the
2366 * given number of fragments.  This is necessary because
2367 * outbound address lists (OAL) will be used when more than
2368 * two frags are given.  Each address list has 5 addr/len
2369 * pairs.  The 5th pair in each AOL is used to  point to
2370 * the next AOL if more frags are coming.
2371 * That is why the frags:segment count  ratio is not linear.
2372 */
2373static int ql_get_seg_count(struct ql3_adapter *qdev,
2374                            unsigned short frags)
2375{
2376        if (qdev->device_id == QL3022_DEVICE_ID)
2377                return 1;
2378
2379        switch(frags) {
2380        case 0: return 1;       /* just the skb->data seg */
2381        case 1: return 2;       /* skb->data + 1 frag */
2382        case 2: return 3;       /* skb->data + 2 frags */
2383        case 3: return 5;       /* skb->data + 1 frag + 1 AOL containting 2 frags */
2384        case 4: return 6;
2385        case 5: return 7;
2386        case 6: return 8;
2387        case 7: return 10;
2388        case 8: return 11;
2389        case 9: return 12;
2390        case 10: return 13;
2391        case 11: return 15;
2392        case 12: return 16;
2393        case 13: return 17;
2394        case 14: return 18;
2395        case 15: return 20;
2396        case 16: return 21;
2397        case 17: return 22;
2398        case 18: return 23;
2399        }
2400        return -1;
2401}
2402
2403static void ql_hw_csum_setup(const struct sk_buff *skb,
2404                             struct ob_mac_iocb_req *mac_iocb_ptr)
2405{
2406        const struct iphdr *ip = ip_hdr(skb);
2407
2408        mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2409        mac_iocb_ptr->ip_hdr_len = ip->ihl;
2410
2411        if (ip->protocol == IPPROTO_TCP) {
2412                mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2413                        OB_3032MAC_IOCB_REQ_IC;
2414        } else {
2415                mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2416                        OB_3032MAC_IOCB_REQ_IC;
2417        }
2418
2419}
2420
2421/*
2422 * Map the buffers for this transmit.  This will return
2423 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2424 */
2425static int ql_send_map(struct ql3_adapter *qdev,
2426                                struct ob_mac_iocb_req *mac_iocb_ptr,
2427                                struct ql_tx_buf_cb *tx_cb,
2428                                struct sk_buff *skb)
2429{
2430        struct oal *oal;
2431        struct oal_entry *oal_entry;
2432        int len = skb_headlen(skb);
2433        dma_addr_t map;
2434        int err;
2435        int completed_segs, i;
2436        int seg_cnt, seg = 0;
2437        int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2438
2439        seg_cnt = tx_cb->seg_count;
2440        /*
2441         * Map the skb buffer first.
2442         */
2443        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2444
2445        err = pci_dma_mapping_error(qdev->pdev, map);
2446        if(err) {
2447                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2448                       qdev->ndev->name, err);
2449
2450                return NETDEV_TX_BUSY;
2451        }
2452
2453        oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2454        oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2455        oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2456        oal_entry->len = cpu_to_le32(len);
2457        pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2458        pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2459        seg++;
2460
2461        if (seg_cnt == 1) {
2462                /* Terminate the last segment. */
2463                oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2464        } else {
2465                oal = tx_cb->oal;
2466                for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2467                        skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2468                        oal_entry++;
2469                        if ((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2470                            (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2471                            (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2472                            (seg == 17 && seg_cnt > 18)) {
2473                                /* Continuation entry points to outbound address list. */
2474                                map = pci_map_single(qdev->pdev, oal,
2475                                                     sizeof(struct oal),
2476                                                     PCI_DMA_TODEVICE);
2477
2478                                err = pci_dma_mapping_error(qdev->pdev, map);
2479                                if(err) {
2480
2481                                        printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2482                                               qdev->ndev->name, err);
2483                                        goto map_error;
2484                                }
2485
2486                                oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2487                                oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2488                                oal_entry->len =
2489                                    cpu_to_le32(sizeof(struct oal) |
2490                                                OAL_CONT_ENTRY);
2491                                pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2492                                                   map);
2493                                pci_unmap_len_set(&tx_cb->map[seg], maplen,
2494                                                  sizeof(struct oal));
2495                                oal_entry = (struct oal_entry *)oal;
2496                                oal++;
2497                                seg++;
2498                        }
2499
2500                        map =
2501                            pci_map_page(qdev->pdev, frag->page,
2502                                         frag->page_offset, frag->size,
2503                                         PCI_DMA_TODEVICE);
2504
2505                        err = pci_dma_mapping_error(qdev->pdev, map);
2506                        if(err) {
2507                                printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2508                                       qdev->ndev->name, err);
2509                                goto map_error;
2510                        }
2511
2512                        oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2513                        oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2514                        oal_entry->len = cpu_to_le32(frag->size);
2515                        pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2516                        pci_unmap_len_set(&tx_cb->map[seg], maplen,
2517                                          frag->size);
2518                }
2519                /* Terminate the last segment. */
2520                oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2521        }
2522
2523        return NETDEV_TX_OK;
2524
2525map_error:
2526        /* A PCI mapping failed and now we will need to back out
2527         * We need to traverse through the oal's and associated pages which
2528         * have been mapped and now we must unmap them to clean up properly
2529         */
2530
2531        seg = 1;
2532        oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2533        oal = tx_cb->oal;
2534        for (i=0; i<completed_segs; i++,seg++) {
2535                oal_entry++;
2536
2537                if((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2538                   (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2539                   (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2540                   (seg == 17 && seg_cnt > 18)) {
2541                        pci_unmap_single(qdev->pdev,
2542                                pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2543                                pci_unmap_len(&tx_cb->map[seg], maplen),
2544                                 PCI_DMA_TODEVICE);
2545                        oal++;
2546                        seg++;
2547                }
2548
2549                pci_unmap_page(qdev->pdev,
2550                               pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2551                               pci_unmap_len(&tx_cb->map[seg], maplen),
2552                               PCI_DMA_TODEVICE);
2553        }
2554
2555        pci_unmap_single(qdev->pdev,
2556                         pci_unmap_addr(&tx_cb->map[0], mapaddr),
2557                         pci_unmap_addr(&tx_cb->map[0], maplen),
2558                         PCI_DMA_TODEVICE);
2559
2560        return NETDEV_TX_BUSY;
2561
2562}
2563
2564/*
2565 * The difference between 3022 and 3032 sends:
2566 * 3022 only supports a simple single segment transmission.
2567 * 3032 supports checksumming and scatter/gather lists (fragments).
2568 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2569 * in the IOCB plus a chain of outbound address lists (OAL) that
2570 * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2571 * will used to point to an OAL when more ALP entries are required.
2572 * The IOCB is always the top of the chain followed by one or more
2573 * OALs (when necessary).
2574 */
2575static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2576                               struct net_device *ndev)
2577{
2578        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2579        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2580        struct ql_tx_buf_cb *tx_cb;
2581        u32 tot_len = skb->len;
2582        struct ob_mac_iocb_req *mac_iocb_ptr;
2583
2584        if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2585                return NETDEV_TX_BUSY;
2586        }
2587
2588        tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2589        if((tx_cb->seg_count = ql_get_seg_count(qdev,
2590                                                (skb_shinfo(skb)->nr_frags))) == -1) {
2591                printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2592                return NETDEV_TX_OK;
2593        }
2594
2595        mac_iocb_ptr = tx_cb->queue_entry;
2596        memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2597        mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2598        mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2599        mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2600        mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2601        mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2602        tx_cb->skb = skb;
2603        if (qdev->device_id == QL3032_DEVICE_ID &&
2604            skb->ip_summed == CHECKSUM_PARTIAL)
2605                ql_hw_csum_setup(skb, mac_iocb_ptr);
2606
2607        if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2608                printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2609                return NETDEV_TX_BUSY;
2610        }
2611
2612        wmb();
2613        qdev->req_producer_index++;
2614        if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2615                qdev->req_producer_index = 0;
2616        wmb();
2617        ql_write_common_reg_l(qdev,
2618                            &port_regs->CommonRegs.reqQProducerIndex,
2619                            qdev->req_producer_index);
2620
2621        if (netif_msg_tx_queued(qdev))
2622                printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2623                       ndev->name, qdev->req_producer_index, skb->len);
2624
2625        atomic_dec(&qdev->tx_count);
2626        return NETDEV_TX_OK;
2627}
2628
2629static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2630{
2631        qdev->req_q_size =
2632            (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2633
2634        qdev->req_q_virt_addr =
2635            pci_alloc_consistent(qdev->pdev,
2636                                 (size_t) qdev->req_q_size,
2637                                 &qdev->req_q_phy_addr);
2638
2639        if ((qdev->req_q_virt_addr == NULL) ||
2640            LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2641                printk(KERN_ERR PFX "%s: reqQ failed.\n",
2642                       qdev->ndev->name);
2643                return -ENOMEM;
2644        }
2645
2646        qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2647
2648        qdev->rsp_q_virt_addr =
2649            pci_alloc_consistent(qdev->pdev,
2650                                 (size_t) qdev->rsp_q_size,
2651                                 &qdev->rsp_q_phy_addr);
2652
2653        if ((qdev->rsp_q_virt_addr == NULL) ||
2654            LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2655                printk(KERN_ERR PFX
2656                       "%s: rspQ allocation failed\n",
2657                       qdev->ndev->name);
2658                pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2659                                    qdev->req_q_virt_addr,
2660                                    qdev->req_q_phy_addr);
2661                return -ENOMEM;
2662        }
2663
2664        set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2665
2666        return 0;
2667}
2668
2669static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2670{
2671        if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2672                printk(KERN_INFO PFX
2673                       "%s: Already done.\n", qdev->ndev->name);
2674                return;
2675        }
2676
2677        pci_free_consistent(qdev->pdev,
2678                            qdev->req_q_size,
2679                            qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2680
2681        qdev->req_q_virt_addr = NULL;
2682
2683        pci_free_consistent(qdev->pdev,
2684                            qdev->rsp_q_size,
2685                            qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2686
2687        qdev->rsp_q_virt_addr = NULL;
2688
2689        clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2690}
2691
2692static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2693{
2694        /* Create Large Buffer Queue */
2695        qdev->lrg_buf_q_size =
2696            qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2697        if (qdev->lrg_buf_q_size < PAGE_SIZE)
2698                qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2699        else
2700                qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2701
2702        qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2703        if (qdev->lrg_buf == NULL) {
2704                printk(KERN_ERR PFX
2705                       "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2706                return -ENOMEM;
2707        }
2708
2709        qdev->lrg_buf_q_alloc_virt_addr =
2710            pci_alloc_consistent(qdev->pdev,
2711                                 qdev->lrg_buf_q_alloc_size,
2712                                 &qdev->lrg_buf_q_alloc_phy_addr);
2713
2714        if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2715                printk(KERN_ERR PFX
2716                       "%s: lBufQ failed\n", qdev->ndev->name);
2717                return -ENOMEM;
2718        }
2719        qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2720        qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2721
2722        /* Create Small Buffer Queue */
2723        qdev->small_buf_q_size =
2724            NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2725        if (qdev->small_buf_q_size < PAGE_SIZE)
2726                qdev->small_buf_q_alloc_size = PAGE_SIZE;
2727        else
2728                qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2729
2730        qdev->small_buf_q_alloc_virt_addr =
2731            pci_alloc_consistent(qdev->pdev,
2732                                 qdev->small_buf_q_alloc_size,
2733                                 &qdev->small_buf_q_alloc_phy_addr);
2734
2735        if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2736                printk(KERN_ERR PFX
2737                       "%s: Small Buffer Queue allocation failed.\n",
2738                       qdev->ndev->name);
2739                pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2740                                    qdev->lrg_buf_q_alloc_virt_addr,
2741                                    qdev->lrg_buf_q_alloc_phy_addr);
2742                return -ENOMEM;
2743        }
2744
2745        qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2746        qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2747        set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2748        return 0;
2749}
2750
2751static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2752{
2753        if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2754                printk(KERN_INFO PFX
2755                       "%s: Already done.\n", qdev->ndev->name);
2756                return;
2757        }
2758        if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2759        pci_free_consistent(qdev->pdev,
2760                            qdev->lrg_buf_q_alloc_size,
2761                            qdev->lrg_buf_q_alloc_virt_addr,
2762                            qdev->lrg_buf_q_alloc_phy_addr);
2763
2764        qdev->lrg_buf_q_virt_addr = NULL;
2765
2766        pci_free_consistent(qdev->pdev,
2767                            qdev->small_buf_q_alloc_size,
2768                            qdev->small_buf_q_alloc_virt_addr,
2769                            qdev->small_buf_q_alloc_phy_addr);
2770
2771        qdev->small_buf_q_virt_addr = NULL;
2772
2773        clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2774}
2775
2776static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2777{
2778        int i;
2779        struct bufq_addr_element *small_buf_q_entry;
2780
2781        /* Currently we allocate on one of memory and use it for smallbuffers */
2782        qdev->small_buf_total_size =
2783            (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2784             QL_SMALL_BUFFER_SIZE);
2785
2786        qdev->small_buf_virt_addr =
2787            pci_alloc_consistent(qdev->pdev,
2788                                 qdev->small_buf_total_size,
2789                                 &qdev->small_buf_phy_addr);
2790
2791        if (qdev->small_buf_virt_addr == NULL) {
2792                printk(KERN_ERR PFX
2793                       "%s: Failed to get small buffer memory.\n",
2794                       qdev->ndev->name);
2795                return -ENOMEM;
2796        }
2797
2798        qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2799        qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2800
2801        small_buf_q_entry = qdev->small_buf_q_virt_addr;
2802
2803        /* Initialize the small buffer queue. */
2804        for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2805                small_buf_q_entry->addr_high =
2806                    cpu_to_le32(qdev->small_buf_phy_addr_high);
2807                small_buf_q_entry->addr_low =
2808                    cpu_to_le32(qdev->small_buf_phy_addr_low +
2809                                (i * QL_SMALL_BUFFER_SIZE));
2810                small_buf_q_entry++;
2811        }
2812        qdev->small_buf_index = 0;
2813        set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2814        return 0;
2815}
2816
2817static void ql_free_small_buffers(struct ql3_adapter *qdev)
2818{
2819        if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2820                printk(KERN_INFO PFX
2821                       "%s: Already done.\n", qdev->ndev->name);
2822                return;
2823        }
2824        if (qdev->small_buf_virt_addr != NULL) {
2825                pci_free_consistent(qdev->pdev,
2826                                    qdev->small_buf_total_size,
2827                                    qdev->small_buf_virt_addr,
2828                                    qdev->small_buf_phy_addr);
2829
2830                qdev->small_buf_virt_addr = NULL;
2831        }
2832}
2833
2834static void ql_free_large_buffers(struct ql3_adapter *qdev)
2835{
2836        int i = 0;
2837        struct ql_rcv_buf_cb *lrg_buf_cb;
2838
2839        for (i = 0; i < qdev->num_large_buffers; i++) {
2840                lrg_buf_cb = &qdev->lrg_buf[i];
2841                if (lrg_buf_cb->skb) {
2842                        dev_kfree_skb(lrg_buf_cb->skb);
2843                        pci_unmap_single(qdev->pdev,
2844                                         pci_unmap_addr(lrg_buf_cb, mapaddr),
2845                                         pci_unmap_len(lrg_buf_cb, maplen),
2846                                         PCI_DMA_FROMDEVICE);
2847                        memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2848                } else {
2849                        break;
2850                }
2851        }
2852}
2853
2854static void ql_init_large_buffers(struct ql3_adapter *qdev)
2855{
2856        int i;
2857        struct ql_rcv_buf_cb *lrg_buf_cb;
2858        struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2859
2860        for (i = 0; i < qdev->num_large_buffers; i++) {
2861                lrg_buf_cb = &qdev->lrg_buf[i];
2862                buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2863                buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2864                buf_addr_ele++;
2865        }
2866        qdev->lrg_buf_index = 0;
2867        qdev->lrg_buf_skb_check = 0;
2868}
2869
2870static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2871{
2872        int i;
2873        struct ql_rcv_buf_cb *lrg_buf_cb;
2874        struct sk_buff *skb;
2875        dma_addr_t map;
2876        int err;
2877
2878        for (i = 0; i < qdev->num_large_buffers; i++) {
2879                skb = netdev_alloc_skb(qdev->ndev,
2880                                       qdev->lrg_buffer_len);
2881                if (unlikely(!skb)) {
2882                        /* Better luck next round */
2883                        printk(KERN_ERR PFX
2884                               "%s: large buff alloc failed, "
2885                               "for %d bytes at index %d.\n",
2886                               qdev->ndev->name,
2887                               qdev->lrg_buffer_len * 2, i);
2888                        ql_free_large_buffers(qdev);
2889                        return -ENOMEM;
2890                } else {
2891
2892                        lrg_buf_cb = &qdev->lrg_buf[i];
2893                        memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2894                        lrg_buf_cb->index = i;
2895                        lrg_buf_cb->skb = skb;
2896                        /*
2897                         * We save some space to copy the ethhdr from first
2898                         * buffer
2899                         */
2900                        skb_reserve(skb, QL_HEADER_SPACE);
2901                        map = pci_map_single(qdev->pdev,
2902                                             skb->data,
2903                                             qdev->lrg_buffer_len -
2904                                             QL_HEADER_SPACE,
2905                                             PCI_DMA_FROMDEVICE);
2906
2907                        err = pci_dma_mapping_error(qdev->pdev, map);
2908                        if(err) {
2909                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2910                                       qdev->ndev->name, err);
2911                                ql_free_large_buffers(qdev);
2912                                return -ENOMEM;
2913                        }
2914
2915                        pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2916                        pci_unmap_len_set(lrg_buf_cb, maplen,
2917                                          qdev->lrg_buffer_len -
2918                                          QL_HEADER_SPACE);
2919                        lrg_buf_cb->buf_phy_addr_low =
2920                            cpu_to_le32(LS_64BITS(map));
2921                        lrg_buf_cb->buf_phy_addr_high =
2922                            cpu_to_le32(MS_64BITS(map));
2923                }
2924        }
2925        return 0;
2926}
2927
2928static void ql_free_send_free_list(struct ql3_adapter *qdev)
2929{
2930        struct ql_tx_buf_cb *tx_cb;
2931        int i;
2932
2933        tx_cb = &qdev->tx_buf[0];
2934        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2935                if (tx_cb->oal) {
2936                        kfree(tx_cb->oal);
2937                        tx_cb->oal = NULL;
2938                }
2939                tx_cb++;
2940        }
2941}
2942
2943static int ql_create_send_free_list(struct ql3_adapter *qdev)
2944{
2945        struct ql_tx_buf_cb *tx_cb;
2946        int i;
2947        struct ob_mac_iocb_req *req_q_curr =
2948                                        qdev->req_q_virt_addr;
2949
2950        /* Create free list of transmit buffers */
2951        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2952
2953                tx_cb = &qdev->tx_buf[i];
2954                tx_cb->skb = NULL;
2955                tx_cb->queue_entry = req_q_curr;
2956                req_q_curr++;
2957                tx_cb->oal = kmalloc(512, GFP_KERNEL);
2958                if (tx_cb->oal == NULL)
2959                        return -1;
2960        }
2961        return 0;
2962}
2963
2964static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2965{
2966        if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2967                qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2968                qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2969        }
2970        else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2971                /*
2972                 * Bigger buffers, so less of them.
2973                 */
2974                qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2975                qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2976        } else {
2977                printk(KERN_ERR PFX
2978                       "%s: Invalid mtu size.  Only 1500 and 9000 are accepted.\n",
2979                       qdev->ndev->name);
2980                return -ENOMEM;
2981        }
2982        qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2983        qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2984        qdev->max_frame_size =
2985            (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2986
2987        /*
2988         * First allocate a page of shared memory and use it for shadow
2989         * locations of Network Request Queue Consumer Address Register and
2990         * Network Completion Queue Producer Index Register
2991         */
2992        qdev->shadow_reg_virt_addr =
2993            pci_alloc_consistent(qdev->pdev,
2994                                 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2995
2996        if (qdev->shadow_reg_virt_addr != NULL) {
2997                qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2998                qdev->req_consumer_index_phy_addr_high =
2999                    MS_64BITS(qdev->shadow_reg_phy_addr);
3000                qdev->req_consumer_index_phy_addr_low =
3001                    LS_64BITS(qdev->shadow_reg_phy_addr);
3002
3003                qdev->prsp_producer_index =
3004                    (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
3005                qdev->rsp_producer_index_phy_addr_high =
3006                    qdev->req_consumer_index_phy_addr_high;
3007                qdev->rsp_producer_index_phy_addr_low =
3008                    qdev->req_consumer_index_phy_addr_low + 8;
3009        } else {
3010                printk(KERN_ERR PFX
3011                       "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3012                return -ENOMEM;
3013        }
3014
3015        if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3016                printk(KERN_ERR PFX
3017                       "%s: ql_alloc_net_req_rsp_queues failed.\n",
3018                       qdev->ndev->name);
3019                goto err_req_rsp;
3020        }
3021
3022        if (ql_alloc_buffer_queues(qdev) != 0) {
3023                printk(KERN_ERR PFX
3024                       "%s: ql_alloc_buffer_queues failed.\n",
3025                       qdev->ndev->name);
3026                goto err_buffer_queues;
3027        }
3028
3029        if (ql_alloc_small_buffers(qdev) != 0) {
3030                printk(KERN_ERR PFX
3031                       "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3032                goto err_small_buffers;
3033        }
3034
3035        if (ql_alloc_large_buffers(qdev) != 0) {
3036                printk(KERN_ERR PFX
3037                       "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3038                goto err_small_buffers;
3039        }
3040
3041        /* Initialize the large buffer queue. */
3042        ql_init_large_buffers(qdev);
3043        if (ql_create_send_free_list(qdev))
3044                goto err_free_list;
3045
3046        qdev->rsp_current = qdev->rsp_q_virt_addr;
3047
3048        return 0;
3049err_free_list:
3050        ql_free_send_free_list(qdev);
3051err_small_buffers:
3052        ql_free_buffer_queues(qdev);
3053err_buffer_queues:
3054        ql_free_net_req_rsp_queues(qdev);
3055err_req_rsp:
3056        pci_free_consistent(qdev->pdev,
3057                            PAGE_SIZE,
3058                            qdev->shadow_reg_virt_addr,
3059                            qdev->shadow_reg_phy_addr);
3060
3061        return -ENOMEM;
3062}
3063
3064static void ql_free_mem_resources(struct ql3_adapter *qdev)
3065{
3066        ql_free_send_free_list(qdev);
3067        ql_free_large_buffers(qdev);
3068        ql_free_small_buffers(qdev);
3069        ql_free_buffer_queues(qdev);
3070        ql_free_net_req_rsp_queues(qdev);
3071        if (qdev->shadow_reg_virt_addr != NULL) {
3072                pci_free_consistent(qdev->pdev,
3073                                    PAGE_SIZE,
3074                                    qdev->shadow_reg_virt_addr,
3075                                    qdev->shadow_reg_phy_addr);
3076                qdev->shadow_reg_virt_addr = NULL;
3077        }
3078}
3079
3080static int ql_init_misc_registers(struct ql3_adapter *qdev)
3081{
3082        struct ql3xxx_local_ram_registers __iomem *local_ram =
3083            (void __iomem *)qdev->mem_map_registers;
3084
3085        if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3086                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3087                         2) << 4))
3088                return -1;
3089
3090        ql_write_page2_reg(qdev,
3091                           &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3092
3093        ql_write_page2_reg(qdev,
3094                           &local_ram->maxBufletCount,
3095                           qdev->nvram_data.bufletCount);
3096
3097        ql_write_page2_reg(qdev,
3098                           &local_ram->freeBufletThresholdLow,
3099                           (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3100                           (qdev->nvram_data.tcpWindowThreshold0));
3101
3102        ql_write_page2_reg(qdev,
3103                           &local_ram->freeBufletThresholdHigh,
3104                           qdev->nvram_data.tcpWindowThreshold50);
3105
3106        ql_write_page2_reg(qdev,
3107                           &local_ram->ipHashTableBase,
3108                           (qdev->nvram_data.ipHashTableBaseHi << 16) |
3109                           qdev->nvram_data.ipHashTableBaseLo);
3110        ql_write_page2_reg(qdev,
3111                           &local_ram->ipHashTableCount,
3112                           qdev->nvram_data.ipHashTableSize);
3113        ql_write_page2_reg(qdev,
3114                           &local_ram->tcpHashTableBase,
3115                           (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3116                           qdev->nvram_data.tcpHashTableBaseLo);
3117        ql_write_page2_reg(qdev,
3118                           &local_ram->tcpHashTableCount,
3119                           qdev->nvram_data.tcpHashTableSize);
3120        ql_write_page2_reg(qdev,
3121                           &local_ram->ncbBase,
3122                           (qdev->nvram_data.ncbTableBaseHi << 16) |
3123                           qdev->nvram_data.ncbTableBaseLo);
3124        ql_write_page2_reg(qdev,
3125                           &local_ram->maxNcbCount,
3126                           qdev->nvram_data.ncbTableSize);
3127        ql_write_page2_reg(qdev,
3128                           &local_ram->drbBase,
3129                           (qdev->nvram_data.drbTableBaseHi << 16) |
3130                           qdev->nvram_data.drbTableBaseLo);
3131        ql_write_page2_reg(qdev,
3132                           &local_ram->maxDrbCount,
3133                           qdev->nvram_data.drbTableSize);
3134        ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3135        return 0;
3136}
3137
3138static int ql_adapter_initialize(struct ql3_adapter *qdev)
3139{
3140        u32 value;
3141        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3142        struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3143                                                (void __iomem *)port_regs;
3144        u32 delay = 10;
3145        int status = 0;
3146        unsigned long hw_flags = 0;
3147
3148        if(ql_mii_setup(qdev))
3149                return -1;
3150
3151        /* Bring out PHY out of reset */
3152        ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3153                            (ISP_SERIAL_PORT_IF_WE |
3154                             (ISP_SERIAL_PORT_IF_WE << 16)));
3155        /* Give the PHY time to come out of reset. */
3156        mdelay(100);
3157        qdev->port_link_state = LS_DOWN;
3158        netif_carrier_off(qdev->ndev);
3159
3160        /* V2 chip fix for ARS-39168. */
3161        ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3162                            (ISP_SERIAL_PORT_IF_SDE |
3163                             (ISP_SERIAL_PORT_IF_SDE << 16)));
3164
3165        /* Request Queue Registers */
3166        *((u32 *) (qdev->preq_consumer_index)) = 0;
3167        atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3168        qdev->req_producer_index = 0;
3169
3170        ql_write_page1_reg(qdev,
3171                           &hmem_regs->reqConsumerIndexAddrHigh,
3172                           qdev->req_consumer_index_phy_addr_high);
3173        ql_write_page1_reg(qdev,
3174                           &hmem_regs->reqConsumerIndexAddrLow,
3175                           qdev->req_consumer_index_phy_addr_low);
3176
3177        ql_write_page1_reg(qdev,
3178                           &hmem_regs->reqBaseAddrHigh,
3179                           MS_64BITS(qdev->req_q_phy_addr));
3180        ql_write_page1_reg(qdev,
3181                           &hmem_regs->reqBaseAddrLow,
3182                           LS_64BITS(qdev->req_q_phy_addr));
3183        ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3184
3185        /* Response Queue Registers */
3186        *((__le16 *) (qdev->prsp_producer_index)) = 0;
3187        qdev->rsp_consumer_index = 0;
3188        qdev->rsp_current = qdev->rsp_q_virt_addr;
3189
3190        ql_write_page1_reg(qdev,
3191                           &hmem_regs->rspProducerIndexAddrHigh,
3192                           qdev->rsp_producer_index_phy_addr_high);
3193
3194        ql_write_page1_reg(qdev,
3195                           &hmem_regs->rspProducerIndexAddrLow,
3196                           qdev->rsp_producer_index_phy_addr_low);
3197
3198        ql_write_page1_reg(qdev,
3199                           &hmem_regs->rspBaseAddrHigh,
3200                           MS_64BITS(qdev->rsp_q_phy_addr));
3201
3202        ql_write_page1_reg(qdev,
3203                           &hmem_regs->rspBaseAddrLow,
3204                           LS_64BITS(qdev->rsp_q_phy_addr));
3205
3206        ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3207
3208        /* Large Buffer Queue */
3209        ql_write_page1_reg(qdev,
3210                           &hmem_regs->rxLargeQBaseAddrHigh,
3211                           MS_64BITS(qdev->lrg_buf_q_phy_addr));
3212
3213        ql_write_page1_reg(qdev,
3214                           &hmem_regs->rxLargeQBaseAddrLow,
3215                           LS_64BITS(qdev->lrg_buf_q_phy_addr));
3216
3217        ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3218
3219        ql_write_page1_reg(qdev,
3220                           &hmem_regs->rxLargeBufferLength,
3221                           qdev->lrg_buffer_len);
3222
3223        /* Small Buffer Queue */
3224        ql_write_page1_reg(qdev,
3225                           &hmem_regs->rxSmallQBaseAddrHigh,
3226                           MS_64BITS(qdev->small_buf_q_phy_addr));
3227
3228        ql_write_page1_reg(qdev,
3229                           &hmem_regs->rxSmallQBaseAddrLow,
3230                           LS_64BITS(qdev->small_buf_q_phy_addr));
3231
3232        ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3233        ql_write_page1_reg(qdev,
3234                           &hmem_regs->rxSmallBufferLength,
3235                           QL_SMALL_BUFFER_SIZE);
3236
3237        qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3238        qdev->small_buf_release_cnt = 8;
3239        qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3240        qdev->lrg_buf_release_cnt = 8;
3241        qdev->lrg_buf_next_free =
3242            (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3243        qdev->small_buf_index = 0;
3244        qdev->lrg_buf_index = 0;
3245        qdev->lrg_buf_free_count = 0;
3246        qdev->lrg_buf_free_head = NULL;
3247        qdev->lrg_buf_free_tail = NULL;
3248
3249        ql_write_common_reg(qdev,
3250                            &port_regs->CommonRegs.
3251                            rxSmallQProducerIndex,
3252                            qdev->small_buf_q_producer_index);
3253        ql_write_common_reg(qdev,
3254                            &port_regs->CommonRegs.
3255                            rxLargeQProducerIndex,
3256                            qdev->lrg_buf_q_producer_index);
3257
3258        /*
3259         * Find out if the chip has already been initialized.  If it has, then
3260         * we skip some of the initialization.
3261         */
3262        clear_bit(QL_LINK_MASTER, &qdev->flags);
3263        value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3264        if ((value & PORT_STATUS_IC) == 0) {
3265
3266                /* Chip has not been configured yet, so let it rip. */
3267                if(ql_init_misc_registers(qdev)) {
3268                        status = -1;
3269                        goto out;
3270                }
3271
3272                value = qdev->nvram_data.tcpMaxWindowSize;
3273                ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3274
3275                value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3276
3277                if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3278                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3279                                 * 2) << 13)) {
3280                        status = -1;
3281                        goto out;
3282                }
3283                ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3284                ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3285                                   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3286                                     16) | (INTERNAL_CHIP_SD |
3287                                            INTERNAL_CHIP_WE)));
3288                ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3289        }
3290
3291        if (qdev->mac_index)
3292                ql_write_page0_reg(qdev,
3293                                   &port_regs->mac1MaxFrameLengthReg,
3294                                   qdev->max_frame_size);
3295        else
3296                ql_write_page0_reg(qdev,
3297                                           &port_regs->mac0MaxFrameLengthReg,
3298                                           qdev->max_frame_size);
3299
3300        if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3301                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3302                         2) << 7)) {
3303                status = -1;
3304                goto out;
3305        }
3306
3307        PHY_Setup(qdev);
3308        ql_init_scan_mode(qdev);
3309        ql_get_phy_owner(qdev);
3310
3311        /* Load the MAC Configuration */
3312
3313        /* Program lower 32 bits of the MAC address */
3314        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3315                           (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3316        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3317                           ((qdev->ndev->dev_addr[2] << 24)
3318                            | (qdev->ndev->dev_addr[3] << 16)
3319                            | (qdev->ndev->dev_addr[4] << 8)
3320                            | qdev->ndev->dev_addr[5]));
3321
3322        /* Program top 16 bits of the MAC address */
3323        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3324                           ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3325        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3326                           ((qdev->ndev->dev_addr[0] << 8)
3327                            | qdev->ndev->dev_addr[1]));
3328
3329        /* Enable Primary MAC */
3330        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3331                           ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3332                            MAC_ADDR_INDIRECT_PTR_REG_PE));
3333
3334        /* Clear Primary and Secondary IP addresses */
3335        ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3336                           ((IP_ADDR_INDEX_REG_MASK << 16) |
3337                            (qdev->mac_index << 2)));
3338        ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3339
3340        ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3341                           ((IP_ADDR_INDEX_REG_MASK << 16) |
3342                            ((qdev->mac_index << 2) + 1)));
3343        ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3344
3345        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3346
3347        /* Indicate Configuration Complete */
3348        ql_write_page0_reg(qdev,
3349                           &port_regs->portControl,
3350                           ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3351
3352        do {
3353                value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3354                if (value & PORT_STATUS_IC)
3355                        break;
3356                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3357                msleep(500);
3358                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3359        } while (--delay);
3360
3361        if (delay == 0) {
3362                printk(KERN_ERR PFX
3363                       "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3364                status = -1;
3365                goto out;
3366        }
3367
3368        /* Enable Ethernet Function */
3369        if (qdev->device_id == QL3032_DEVICE_ID) {
3370                value =
3371                    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3372                     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3373                        QL3032_PORT_CONTROL_ET);
3374                ql_write_page0_reg(qdev, &port_regs->functionControl,
3375                                   ((value << 16) | value));
3376        } else {
3377                value =
3378                    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3379                     PORT_CONTROL_HH);
3380                ql_write_page0_reg(qdev, &port_regs->portControl,
3381                                   ((value << 16) | value));
3382        }
3383
3384
3385out:
3386        return status;
3387}
3388
3389/*
3390 * Caller holds hw_lock.
3391 */
3392static int ql_adapter_reset(struct ql3_adapter *qdev)
3393{
3394        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3395        int status = 0;
3396        u16 value;
3397        int max_wait_time;
3398
3399        set_bit(QL_RESET_ACTIVE, &qdev->flags);
3400        clear_bit(QL_RESET_DONE, &qdev->flags);
3401
3402        /*
3403         * Issue soft reset to chip.
3404         */
3405        printk(KERN_DEBUG PFX
3406               "%s: Issue soft reset to chip.\n",
3407               qdev->ndev->name);
3408        ql_write_common_reg(qdev,
3409                            &port_regs->CommonRegs.ispControlStatus,
3410                            ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3411
3412        /* Wait 3 seconds for reset to complete. */
3413        printk(KERN_DEBUG PFX
3414               "%s: Wait 10 milliseconds for reset to complete.\n",
3415               qdev->ndev->name);
3416
3417        /* Wait until the firmware tells us the Soft Reset is done */
3418        max_wait_time = 5;
3419        do {
3420                value =
3421                    ql_read_common_reg(qdev,
3422                                       &port_regs->CommonRegs.ispControlStatus);
3423                if ((value & ISP_CONTROL_SR) == 0)
3424                        break;
3425
3426                ssleep(1);
3427        } while ((--max_wait_time));
3428
3429        /*
3430         * Also, make sure that the Network Reset Interrupt bit has been
3431         * cleared after the soft reset has taken place.
3432         */
3433        value =
3434            ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3435        if (value & ISP_CONTROL_RI) {
3436                printk(KERN_DEBUG PFX
3437                       "ql_adapter_reset: clearing RI after reset.\n");
3438                ql_write_common_reg(qdev,
3439                                    &port_regs->CommonRegs.
3440                                    ispControlStatus,
3441                                    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3442        }
3443
3444        if (max_wait_time == 0) {
3445                /* Issue Force Soft Reset */
3446                ql_write_common_reg(qdev,
3447                                    &port_regs->CommonRegs.
3448                                    ispControlStatus,
3449                                    ((ISP_CONTROL_FSR << 16) |
3450                                     ISP_CONTROL_FSR));
3451                /*
3452                 * Wait until the firmware tells us the Force Soft Reset is
3453                 * done
3454                 */
3455                max_wait_time = 5;
3456                do {
3457                        value =
3458                            ql_read_common_reg(qdev,
3459                                               &port_regs->CommonRegs.
3460                                               ispControlStatus);
3461                        if ((value & ISP_CONTROL_FSR) == 0) {
3462                                break;
3463                        }
3464                        ssleep(1);
3465                } while ((--max_wait_time));
3466        }
3467        if (max_wait_time == 0)
3468                status = 1;
3469
3470        clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3471        set_bit(QL_RESET_DONE, &qdev->flags);
3472        return status;
3473}
3474
3475static void ql_set_mac_info(struct ql3_adapter *qdev)
3476{
3477        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3478        u32 value, port_status;
3479        u8 func_number;
3480
3481        /* Get the function number */
3482        value =
3483            ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3484        func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3485        port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3486        switch (value & ISP_CONTROL_FN_MASK) {
3487        case ISP_CONTROL_FN0_NET:
3488                qdev->mac_index = 0;
3489                qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3490                qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3491                qdev->PHYAddr = PORT0_PHY_ADDRESS;
3492                if (port_status & PORT_STATUS_SM0)
3493                        set_bit(QL_LINK_OPTICAL,&qdev->flags);
3494                else
3495                        clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3496                break;
3497
3498        case ISP_CONTROL_FN1_NET:
3499                qdev->mac_index = 1;
3500                qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3501                qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3502                qdev->PHYAddr = PORT1_PHY_ADDRESS;
3503                if (port_status & PORT_STATUS_SM1)
3504                        set_bit(QL_LINK_OPTICAL,&qdev->flags);
3505                else
3506                        clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3507                break;
3508
3509        case ISP_CONTROL_FN0_SCSI:
3510        case ISP_CONTROL_FN1_SCSI:
3511        default:
3512                printk(KERN_DEBUG PFX
3513                       "%s: Invalid function number, ispControlStatus = 0x%x\n",
3514                       qdev->ndev->name,value);
3515                break;
3516        }
3517        qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3518}
3519
3520static void ql_display_dev_info(struct net_device *ndev)
3521{
3522        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3523        struct pci_dev *pdev = qdev->pdev;
3524
3525        printk(KERN_INFO PFX
3526               "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3527               DRV_NAME, qdev->index, qdev->chip_rev_id,
3528               (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3529               qdev->pci_slot);
3530        printk(KERN_INFO PFX
3531               "%s Interface.\n",
3532               test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3533
3534        /*
3535         * Print PCI bus width/type.
3536         */
3537        printk(KERN_INFO PFX
3538               "Bus interface is %s %s.\n",
3539               ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3540               ((qdev->pci_x) ? "PCI-X" : "PCI"));
3541
3542        printk(KERN_INFO PFX
3543               "mem  IO base address adjusted = 0x%p\n",
3544               qdev->mem_map_registers);
3545        printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3546
3547        if (netif_msg_probe(qdev))
3548                printk(KERN_INFO PFX
3549                       "%s: MAC address %pM\n",
3550                       ndev->name, ndev->dev_addr);
3551}
3552
3553static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3554{
3555        struct net_device *ndev = qdev->ndev;
3556        int retval = 0;
3557
3558        netif_stop_queue(ndev);
3559        netif_carrier_off(ndev);
3560
3561        clear_bit(QL_ADAPTER_UP,&qdev->flags);
3562        clear_bit(QL_LINK_MASTER,&qdev->flags);
3563
3564        ql_disable_interrupts(qdev);
3565
3566        free_irq(qdev->pdev->irq, ndev);
3567
3568        if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3569                printk(KERN_INFO PFX
3570                       "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3571                clear_bit(QL_MSI_ENABLED,&qdev->flags);
3572                pci_disable_msi(qdev->pdev);
3573        }
3574
3575        del_timer_sync(&qdev->adapter_timer);
3576
3577        napi_disable(&qdev->napi);
3578
3579        if (do_reset) {
3580                int soft_reset;
3581                unsigned long hw_flags;
3582
3583                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3584                if (ql_wait_for_drvr_lock(qdev)) {
3585                        if ((soft_reset = ql_adapter_reset(qdev))) {
3586                                printk(KERN_ERR PFX
3587                                       "%s: ql_adapter_reset(%d) FAILED!\n",
3588                                       ndev->name, qdev->index);
3589                        }
3590                        printk(KERN_ERR PFX
3591                                "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3592                } else {
3593                        printk(KERN_ERR PFX
3594                               "%s: Could not acquire driver lock to do "
3595                               "reset!\n", ndev->name);
3596                        retval = -1;
3597                }
3598                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3599        }
3600        ql_free_mem_resources(qdev);
3601        return retval;
3602}
3603
3604static int ql_adapter_up(struct ql3_adapter *qdev)
3605{
3606        struct net_device *ndev = qdev->ndev;
3607        int err;
3608        unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3609        unsigned long hw_flags;
3610
3611        if (ql_alloc_mem_resources(qdev)) {
3612                printk(KERN_ERR PFX
3613                       "%s Unable to  allocate buffers.\n", ndev->name);
3614                return -ENOMEM;
3615        }
3616
3617        if (qdev->msi) {
3618                if (pci_enable_msi(qdev->pdev)) {
3619                        printk(KERN_ERR PFX
3620                               "%s: User requested MSI, but MSI failed to "
3621                               "initialize.  Continuing without MSI.\n",
3622                               qdev->ndev->name);
3623                        qdev->msi = 0;
3624                } else {
3625                        printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3626                        set_bit(QL_MSI_ENABLED,&qdev->flags);
3627                        irq_flags &= ~IRQF_SHARED;
3628                }
3629        }
3630
3631        if ((err = request_irq(qdev->pdev->irq,
3632                               ql3xxx_isr,
3633                               irq_flags, ndev->name, ndev))) {
3634                printk(KERN_ERR PFX
3635                       "%s: Failed to reserve interrupt %d already in use.\n",
3636                       ndev->name, qdev->pdev->irq);
3637                goto err_irq;
3638        }
3639
3640        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3641
3642        if ((err = ql_wait_for_drvr_lock(qdev))) {
3643                if ((err = ql_adapter_initialize(qdev))) {
3644                        printk(KERN_ERR PFX
3645                               "%s: Unable to initialize adapter.\n",
3646                               ndev->name);
3647                        goto err_init;
3648                }
3649                printk(KERN_ERR PFX
3650                                "%s: Releaseing driver lock.\n",ndev->name);
3651                ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3652        } else {
3653                printk(KERN_ERR PFX
3654                       "%s: Could not aquire driver lock.\n",
3655                       ndev->name);
3656                goto err_lock;
3657        }
3658
3659        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3660
3661        set_bit(QL_ADAPTER_UP,&qdev->flags);
3662
3663        mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3664
3665        napi_enable(&qdev->napi);
3666        ql_enable_interrupts(qdev);
3667        return 0;
3668
3669err_init:
3670        ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3671err_lock:
3672        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3673        free_irq(qdev->pdev->irq, ndev);
3674err_irq:
3675        if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3676                printk(KERN_INFO PFX
3677                       "%s: calling pci_disable_msi().\n",
3678                       qdev->ndev->name);
3679                clear_bit(QL_MSI_ENABLED,&qdev->flags);
3680                pci_disable_msi(qdev->pdev);
3681        }
3682        return err;
3683}
3684
3685static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3686{
3687        if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3688                printk(KERN_ERR PFX
3689                                "%s: Driver up/down cycle failed, "
3690                                "closing device\n",qdev->ndev->name);
3691                rtnl_lock();
3692                dev_close(qdev->ndev);
3693                rtnl_unlock();
3694                return -1;
3695        }
3696        return 0;
3697}
3698
3699static int ql3xxx_close(struct net_device *ndev)
3700{
3701        struct ql3_adapter *qdev = netdev_priv(ndev);
3702
3703        /*
3704         * Wait for device to recover from a reset.
3705         * (Rarely happens, but possible.)
3706         */
3707        while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3708                msleep(50);
3709
3710        ql_adapter_down(qdev,QL_DO_RESET);
3711        return 0;
3712}
3713
3714static int ql3xxx_open(struct net_device *ndev)
3715{
3716        struct ql3_adapter *qdev = netdev_priv(ndev);
3717        return (ql_adapter_up(qdev));
3718}
3719
3720static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3721{
3722        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3723        struct ql3xxx_port_registers __iomem *port_regs =
3724                        qdev->mem_map_registers;
3725        struct sockaddr *addr = p;
3726        unsigned long hw_flags;
3727
3728        if (netif_running(ndev))
3729                return -EBUSY;
3730
3731        if (!is_valid_ether_addr(addr->sa_data))
3732                return -EADDRNOTAVAIL;
3733
3734        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3735
3736        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3737        /* Program lower 32 bits of the MAC address */
3738        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3739                           (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3740        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3741                           ((ndev->dev_addr[2] << 24) | (ndev->
3742                                                         dev_addr[3] << 16) |
3743                            (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3744
3745        /* Program top 16 bits of the MAC address */
3746        ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3747                           ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3748        ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3749                           ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3750        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3751
3752        return 0;
3753}
3754
3755static void ql3xxx_tx_timeout(struct net_device *ndev)
3756{
3757        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3758
3759        printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3760        /*
3761         * Stop the queues, we've got a problem.
3762         */
3763        netif_stop_queue(ndev);
3764
3765        /*
3766         * Wake up the worker to process this event.
3767         */
3768        queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3769}
3770
3771static void ql_reset_work(struct work_struct *work)
3772{
3773        struct ql3_adapter *qdev =
3774                container_of(work, struct ql3_adapter, reset_work.work);
3775        struct net_device *ndev = qdev->ndev;
3776        u32 value;
3777        struct ql_tx_buf_cb *tx_cb;
3778        int max_wait_time, i;
3779        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3780        unsigned long hw_flags;
3781
3782        if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3783                clear_bit(QL_LINK_MASTER,&qdev->flags);
3784
3785                /*
3786                 * Loop through the active list and return the skb.
3787                 */
3788                for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3789                        int j;
3790                        tx_cb = &qdev->tx_buf[i];
3791                        if (tx_cb->skb) {
3792                                printk(KERN_DEBUG PFX
3793                                       "%s: Freeing lost SKB.\n",
3794                                       qdev->ndev->name);
3795                                pci_unmap_single(qdev->pdev,
3796                                         pci_unmap_addr(&tx_cb->map[0], mapaddr),
3797                                         pci_unmap_len(&tx_cb->map[0], maplen),
3798                                         PCI_DMA_TODEVICE);
3799                                for(j=1;j<tx_cb->seg_count;j++) {
3800                                        pci_unmap_page(qdev->pdev,
3801                                               pci_unmap_addr(&tx_cb->map[j],mapaddr),
3802                                               pci_unmap_len(&tx_cb->map[j],maplen),
3803                                               PCI_DMA_TODEVICE);
3804                                }
3805                                dev_kfree_skb(tx_cb->skb);
3806                                tx_cb->skb = NULL;
3807                        }
3808                }
3809
3810                printk(KERN_ERR PFX
3811                       "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3812                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3813                ql_write_common_reg(qdev,
3814                                    &port_regs->CommonRegs.
3815                                    ispControlStatus,
3816                                    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3817                /*
3818                 * Wait the for Soft Reset to Complete.
3819                 */
3820                max_wait_time = 10;
3821                do {
3822                        value = ql_read_common_reg(qdev,
3823                                                   &port_regs->CommonRegs.
3824
3825                                                   ispControlStatus);
3826                        if ((value & ISP_CONTROL_SR) == 0) {
3827                                printk(KERN_DEBUG PFX
3828                                       "%s: reset completed.\n",
3829                                       qdev->ndev->name);
3830                                break;
3831                        }
3832
3833                        if (value & ISP_CONTROL_RI) {
3834                                printk(KERN_DEBUG PFX
3835                                       "%s: clearing NRI after reset.\n",
3836                                       qdev->ndev->name);
3837                                ql_write_common_reg(qdev,
3838                                                    &port_regs->
3839                                                    CommonRegs.
3840                                                    ispControlStatus,
3841                                                    ((ISP_CONTROL_RI <<
3842                                                      16) | ISP_CONTROL_RI));
3843                        }
3844
3845                        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3846                        ssleep(1);
3847                        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3848                } while (--max_wait_time);
3849                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3850
3851                if (value & ISP_CONTROL_SR) {
3852
3853                        /*
3854                         * Set the reset flags and clear the board again.
3855                         * Nothing else to do...
3856                         */
3857                        printk(KERN_ERR PFX
3858                               "%s: Timed out waiting for reset to "
3859                               "complete.\n", ndev->name);
3860                        printk(KERN_ERR PFX
3861                               "%s: Do a reset.\n", ndev->name);
3862                        clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3863                        clear_bit(QL_RESET_START,&qdev->flags);
3864                        ql_cycle_adapter(qdev,QL_DO_RESET);
3865                        return;
3866                }
3867
3868                clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3869                clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3870                clear_bit(QL_RESET_START,&qdev->flags);
3871                ql_cycle_adapter(qdev,QL_NO_RESET);
3872        }
3873}
3874
3875static void ql_tx_timeout_work(struct work_struct *work)
3876{
3877        struct ql3_adapter *qdev =
3878                container_of(work, struct ql3_adapter, tx_timeout_work.work);
3879
3880        ql_cycle_adapter(qdev, QL_DO_RESET);
3881}
3882
3883static void ql_get_board_info(struct ql3_adapter *qdev)
3884{
3885        struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3886        u32 value;
3887
3888        value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3889
3890        qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3891        if (value & PORT_STATUS_64)
3892                qdev->pci_width = 64;
3893        else
3894                qdev->pci_width = 32;
3895        if (value & PORT_STATUS_X)
3896                qdev->pci_x = 1;
3897        else
3898                qdev->pci_x = 0;
3899        qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3900}
3901
3902static void ql3xxx_timer(unsigned long ptr)
3903{
3904        struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3905        queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3906}
3907
3908static const struct net_device_ops ql3xxx_netdev_ops = {
3909        .ndo_open               = ql3xxx_open,
3910        .ndo_start_xmit         = ql3xxx_send,
3911        .ndo_stop               = ql3xxx_close,
3912        .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
3913        .ndo_change_mtu         = eth_change_mtu,
3914        .ndo_validate_addr      = eth_validate_addr,
3915        .ndo_set_mac_address    = ql3xxx_set_mac_address,
3916        .ndo_tx_timeout         = ql3xxx_tx_timeout,
3917};
3918
3919static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3920                                  const struct pci_device_id *pci_entry)
3921{
3922        struct net_device *ndev = NULL;
3923        struct ql3_adapter *qdev = NULL;
3924        static int cards_found = 0;
3925        int uninitialized_var(pci_using_dac), err;
3926
3927        err = pci_enable_device(pdev);
3928        if (err) {
3929                printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3930                       pci_name(pdev));
3931                goto err_out;
3932        }
3933
3934        err = pci_request_regions(pdev, DRV_NAME);
3935        if (err) {
3936                printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3937                       pci_name(pdev));
3938                goto err_out_disable_pdev;
3939        }
3940
3941        pci_set_master(pdev);
3942
3943        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3944                pci_using_dac = 1;
3945                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3946        } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3947                pci_using_dac = 0;
3948                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3949        }
3950
3951        if (err) {
3952                printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3953                       pci_name(pdev));
3954                goto err_out_free_regions;
3955        }
3956
3957        ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3958        if (!ndev) {
3959                printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3960                       pci_name(pdev));
3961                err = -ENOMEM;
3962                goto err_out_free_regions;
3963        }
3964
3965        SET_NETDEV_DEV(ndev, &pdev->dev);
3966
3967        pci_set_drvdata(pdev, ndev);
3968
3969        qdev = netdev_priv(ndev);
3970        qdev->index = cards_found;
3971        qdev->ndev = ndev;
3972        qdev->pdev = pdev;
3973        qdev->device_id = pci_entry->device;
3974        qdev->port_link_state = LS_DOWN;
3975        if (msi)
3976                qdev->msi = 1;
3977
3978        qdev->msg_enable = netif_msg_init(debug, default_msg);
3979
3980        if (pci_using_dac)
3981                ndev->features |= NETIF_F_HIGHDMA;
3982        if (qdev->device_id == QL3032_DEVICE_ID)
3983                ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3984
3985        qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3986        if (!qdev->mem_map_registers) {
3987                printk(KERN_ERR PFX "%s: cannot map device registers\n",
3988                       pci_name(pdev));
3989                err = -EIO;
3990                goto err_out_free_ndev;
3991        }
3992
3993        spin_lock_init(&qdev->adapter_lock);
3994        spin_lock_init(&qdev->hw_lock);
3995
3996        /* Set driver entry points */
3997        ndev->netdev_ops = &ql3xxx_netdev_ops;
3998        SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3999        ndev->watchdog_timeo = 5 * HZ;
4000
4001        netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
4002
4003        ndev->irq = pdev->irq;
4004
4005        /* make sure the EEPROM is good */
4006        if (ql_get_nvram_params(qdev)) {
4007                printk(KERN_ALERT PFX
4008                       "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
4009                       qdev->index);
4010                err = -EIO;
4011                goto err_out_iounmap;
4012        }
4013
4014        ql_set_mac_info(qdev);
4015
4016        /* Validate and set parameters */
4017        if (qdev->mac_index) {
4018                ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
4019                ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
4020        } else {
4021                ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
4022                ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
4023        }
4024        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4025
4026        ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4027
4028        /* Record PCI bus information. */
4029        ql_get_board_info(qdev);
4030
4031        /*
4032         * Set the Maximum Memory Read Byte Count value. We do this to handle
4033         * jumbo frames.
4034         */
4035        if (qdev->pci_x) {
4036                pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4037        }
4038
4039        err = register_netdev(ndev);
4040        if (err) {
4041                printk(KERN_ERR PFX "%s: cannot register net device\n",
4042                       pci_name(pdev));
4043                goto err_out_iounmap;
4044        }
4045
4046        /* we're going to reset, so assume we have no link for now */
4047
4048        netif_carrier_off(ndev);
4049        netif_stop_queue(ndev);
4050
4051        qdev->workqueue = create_singlethread_workqueue(ndev->name);
4052        INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
4053        INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
4054        INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
4055
4056        init_timer(&qdev->adapter_timer);
4057        qdev->adapter_timer.function = ql3xxx_timer;
4058        qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4059        qdev->adapter_timer.data = (unsigned long)qdev;
4060
4061        if(!cards_found) {
4062                printk(KERN_ALERT PFX "%s\n", DRV_STRING);
4063                printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
4064                   DRV_NAME, DRV_VERSION);
4065        }
4066        ql_display_dev_info(ndev);
4067
4068        cards_found++;
4069        return 0;
4070
4071err_out_iounmap:
4072        iounmap(qdev->mem_map_registers);
4073err_out_free_ndev:
4074        free_netdev(ndev);
4075err_out_free_regions:
4076        pci_release_regions(pdev);
4077err_out_disable_pdev:
4078        pci_disable_device(pdev);
4079        pci_set_drvdata(pdev, NULL);
4080err_out:
4081        return err;
4082}
4083
4084static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4085{
4086        struct net_device *ndev = pci_get_drvdata(pdev);
4087        struct ql3_adapter *qdev = netdev_priv(ndev);
4088
4089        unregister_netdev(ndev);
4090        qdev = netdev_priv(ndev);
4091
4092        ql_disable_interrupts(qdev);
4093
4094        if (qdev->workqueue) {
4095                cancel_delayed_work(&qdev->reset_work);
4096                cancel_delayed_work(&qdev->tx_timeout_work);
4097                destroy_workqueue(qdev->workqueue);
4098                qdev->workqueue = NULL;
4099        }
4100
4101        iounmap(qdev->mem_map_registers);
4102        pci_release_regions(pdev);
4103        pci_set_drvdata(pdev, NULL);
4104        free_netdev(ndev);
4105}
4106
4107static struct pci_driver ql3xxx_driver = {
4108
4109        .name = DRV_NAME,
4110        .id_table = ql3xxx_pci_tbl,
4111        .probe = ql3xxx_probe,
4112        .remove = __devexit_p(ql3xxx_remove),
4113};
4114
4115static int __init ql3xxx_init_module(void)
4116{
4117        return pci_register_driver(&ql3xxx_driver);
4118}
4119
4120static void __exit ql3xxx_exit(void)
4121{
4122        pci_unregister_driver(&ql3xxx_driver);
4123}
4124
4125module_init(ql3xxx_init_module);
4126module_exit(ql3xxx_exit);
4127