linux/drivers/net/ethernet/qlogic/qlge/qlge_main.c
<<
>>
Prefs
   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/bitops.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/if_vlan.h>
  37#include <linux/skbuff.h>
  38#include <linux/delay.h>
  39#include <linux/mm.h>
  40#include <linux/vmalloc.h>
  41#include <linux/prefetch.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |    */
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61/*  NETIF_MSG_TX_QUEUED | */
  62/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = -1;  /* defaults above */
  67module_param(debug, int, 0664);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int qlge_irq_type = MSIX_IRQ;
  74module_param(qlge_irq_type, int, 0664);
  75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static int qlge_mpi_coredump;
  78module_param(qlge_mpi_coredump, int, 0);
  79MODULE_PARM_DESC(qlge_mpi_coredump,
  80                "Option to enable MPI firmware dump. "
  81                "Default is OFF - Do Not allocate memory. ");
  82
  83static int qlge_force_coredump;
  84module_param(qlge_force_coredump, int, 0);
  85MODULE_PARM_DESC(qlge_force_coredump,
  86                "Option to allow force of firmware core dump. "
  87                "Default is OFF - Do not allow.");
  88
  89static const struct pci_device_id qlge_pci_tbl[] = {
  90        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  91        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  92        /* required last entry */
  93        {0,}
  94};
  95
  96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  97
  98static int ql_wol(struct ql_adapter *);
  99static void qlge_set_multicast_list(struct net_device *);
 100static int ql_adapter_down(struct ql_adapter *);
 101static int ql_adapter_up(struct ql_adapter *);
 102
 103/* This hardware semaphore causes exclusive access to
 104 * resources shared between the NIC driver, MPI firmware,
 105 * FCOE firmware and the FC driver.
 106 */
 107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 108{
 109        u32 sem_bits = 0;
 110
 111        switch (sem_mask) {
 112        case SEM_XGMAC0_MASK:
 113                sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
 114                break;
 115        case SEM_XGMAC1_MASK:
 116                sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 117                break;
 118        case SEM_ICB_MASK:
 119                sem_bits = SEM_SET << SEM_ICB_SHIFT;
 120                break;
 121        case SEM_MAC_ADDR_MASK:
 122                sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 123                break;
 124        case SEM_FLASH_MASK:
 125                sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 126                break;
 127        case SEM_PROBE_MASK:
 128                sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 129                break;
 130        case SEM_RT_IDX_MASK:
 131                sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 132                break;
 133        case SEM_PROC_REG_MASK:
 134                sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 135                break;
 136        default:
 137                netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
 138                return -EINVAL;
 139        }
 140
 141        ql_write32(qdev, SEM, sem_bits | sem_mask);
 142        return !(ql_read32(qdev, SEM) & sem_bits);
 143}
 144
 145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 146{
 147        unsigned int wait_count = 30;
 148        do {
 149                if (!ql_sem_trylock(qdev, sem_mask))
 150                        return 0;
 151                udelay(100);
 152        } while (--wait_count);
 153        return -ETIMEDOUT;
 154}
 155
 156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 157{
 158        ql_write32(qdev, SEM, sem_mask);
 159        ql_read32(qdev, SEM);   /* flush */
 160}
 161
 162/* This function waits for a specific bit to come ready
 163 * in a given register.  It is used mostly by the initialize
 164 * process, but is also used in kernel thread API such as
 165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 166 */
 167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 168{
 169        u32 temp;
 170        int count = UDELAY_COUNT;
 171
 172        while (count) {
 173                temp = ql_read32(qdev, reg);
 174
 175                /* check for errors */
 176                if (temp & err_bit) {
 177                        netif_alert(qdev, probe, qdev->ndev,
 178                                    "register 0x%.08x access error, value = 0x%.08x!.\n",
 179                                    reg, temp);
 180                        return -EIO;
 181                } else if (temp & bit)
 182                        return 0;
 183                udelay(UDELAY_DELAY);
 184                count--;
 185        }
 186        netif_alert(qdev, probe, qdev->ndev,
 187                    "Timed out waiting for reg %x to come ready.\n", reg);
 188        return -ETIMEDOUT;
 189}
 190
 191/* The CFG register is used to download TX and RX control blocks
 192 * to the chip. This function waits for an operation to complete.
 193 */
 194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 195{
 196        int count = UDELAY_COUNT;
 197        u32 temp;
 198
 199        while (count) {
 200                temp = ql_read32(qdev, CFG);
 201                if (temp & CFG_LE)
 202                        return -EIO;
 203                if (!(temp & bit))
 204                        return 0;
 205                udelay(UDELAY_DELAY);
 206                count--;
 207        }
 208        return -ETIMEDOUT;
 209}
 210
 211
 212/* Used to issue init control blocks to hw. Maps control block,
 213 * sets address, triggers download, waits for completion.
 214 */
 215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 216                 u16 q_id)
 217{
 218        u64 map;
 219        int status = 0;
 220        int direction;
 221        u32 mask;
 222        u32 value;
 223
 224        direction =
 225            (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 226            PCI_DMA_FROMDEVICE;
 227
 228        map = pci_map_single(qdev->pdev, ptr, size, direction);
 229        if (pci_dma_mapping_error(qdev->pdev, map)) {
 230                netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
 231                return -ENOMEM;
 232        }
 233
 234        status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 235        if (status)
 236                return status;
 237
 238        status = ql_wait_cfg(qdev, bit);
 239        if (status) {
 240                netif_err(qdev, ifup, qdev->ndev,
 241                          "Timed out waiting for CFG to come ready.\n");
 242                goto exit;
 243        }
 244
 245        ql_write32(qdev, ICB_L, (u32) map);
 246        ql_write32(qdev, ICB_H, (u32) (map >> 32));
 247
 248        mask = CFG_Q_MASK | (bit << 16);
 249        value = bit | (q_id << CFG_Q_SHIFT);
 250        ql_write32(qdev, CFG, (mask | value));
 251
 252        /*
 253         * Wait for the bit to clear after signaling hw.
 254         */
 255        status = ql_wait_cfg(qdev, bit);
 256exit:
 257        ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
 258        pci_unmap_single(qdev->pdev, map, size, direction);
 259        return status;
 260}
 261
 262/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 264                        u32 *value)
 265{
 266        u32 offset = 0;
 267        int status;
 268
 269        switch (type) {
 270        case MAC_ADDR_TYPE_MULTI_MAC:
 271        case MAC_ADDR_TYPE_CAM_MAC:
 272                {
 273                        status =
 274                            ql_wait_reg_rdy(qdev,
 275                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 276                        if (status)
 277                                goto exit;
 278                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 279                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 280                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 281                        status =
 282                            ql_wait_reg_rdy(qdev,
 283                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 284                        if (status)
 285                                goto exit;
 286                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 287                        status =
 288                            ql_wait_reg_rdy(qdev,
 289                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 290                        if (status)
 291                                goto exit;
 292                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295                        status =
 296                            ql_wait_reg_rdy(qdev,
 297                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 298                        if (status)
 299                                goto exit;
 300                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
 302                                status =
 303                                    ql_wait_reg_rdy(qdev,
 304                                        MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 305                                if (status)
 306                                        goto exit;
 307                                ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 308                                           (index << MAC_ADDR_IDX_SHIFT) | /* index */
 309                                           MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 310                                status =
 311                                    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 312                                                    MAC_ADDR_MR, 0);
 313                                if (status)
 314                                        goto exit;
 315                                *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 316                        }
 317                        break;
 318                }
 319        case MAC_ADDR_TYPE_VLAN:
 320        case MAC_ADDR_TYPE_MULTI_FLTR:
 321        default:
 322                netif_crit(qdev, ifup, qdev->ndev,
 323                           "Address type %d not yet supported.\n", type);
 324                status = -EPERM;
 325        }
 326exit:
 327        return status;
 328}
 329
 330/* Set up a MAC, multicast or VLAN address for the
 331 * inbound frame matching.
 332 */
 333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 334                               u16 index)
 335{
 336        u32 offset = 0;
 337        int status = 0;
 338
 339        switch (type) {
 340        case MAC_ADDR_TYPE_MULTI_MAC:
 341                {
 342                        u32 upper = (addr[0] << 8) | addr[1];
 343                        u32 lower = (addr[2] << 24) | (addr[3] << 16) |
 344                                        (addr[4] << 8) | (addr[5]);
 345
 346                        status =
 347                                ql_wait_reg_rdy(qdev,
 348                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 349                        if (status)
 350                                goto exit;
 351                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 352                                (index << MAC_ADDR_IDX_SHIFT) |
 353                                type | MAC_ADDR_E);
 354                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 355                        status =
 356                                ql_wait_reg_rdy(qdev,
 357                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 358                        if (status)
 359                                goto exit;
 360                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 361                                (index << MAC_ADDR_IDX_SHIFT) |
 362                                type | MAC_ADDR_E);
 363
 364                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 365                        status =
 366                                ql_wait_reg_rdy(qdev,
 367                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 368                        if (status)
 369                                goto exit;
 370                        break;
 371                }
 372        case MAC_ADDR_TYPE_CAM_MAC:
 373                {
 374                        u32 cam_output;
 375                        u32 upper = (addr[0] << 8) | addr[1];
 376                        u32 lower =
 377                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 378                            (addr[5]);
 379                        status =
 380                            ql_wait_reg_rdy(qdev,
 381                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 382                        if (status)
 383                                goto exit;
 384                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 385                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 386                                   type);       /* type */
 387                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 388                        status =
 389                            ql_wait_reg_rdy(qdev,
 390                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 391                        if (status)
 392                                goto exit;
 393                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 394                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 395                                   type);       /* type */
 396                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 397                        status =
 398                            ql_wait_reg_rdy(qdev,
 399                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 400                        if (status)
 401                                goto exit;
 402                        ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
 403                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 404                                   type);       /* type */
 405                        /* This field should also include the queue id
 406                           and possibly the function id.  Right now we hardcode
 407                           the route field to NIC core.
 408                         */
 409                        cam_output = (CAM_OUT_ROUTE_NIC |
 410                                      (qdev->
 411                                       func << CAM_OUT_FUNC_SHIFT) |
 412                                        (0 << CAM_OUT_CQ_ID_SHIFT));
 413                        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 414                                cam_output |= CAM_OUT_RV;
 415                        /* route to NIC core */
 416                        ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 417                        break;
 418                }
 419        case MAC_ADDR_TYPE_VLAN:
 420                {
 421                        u32 enable_bit = *((u32 *) &addr[0]);
 422                        /* For VLAN, the addr actually holds a bit that
 423                         * either enables or disables the vlan id we are
 424                         * addressing. It's either MAC_ADDR_E on or off.
 425                         * That's bit-27 we're talking about.
 426                         */
 427                        status =
 428                            ql_wait_reg_rdy(qdev,
 429                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 430                        if (status)
 431                                goto exit;
 432                        ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
 433                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 434                                   type |       /* type */
 435                                   enable_bit); /* enable/disable */
 436                        break;
 437                }
 438        case MAC_ADDR_TYPE_MULTI_FLTR:
 439        default:
 440                netif_crit(qdev, ifup, qdev->ndev,
 441                           "Address type %d not yet supported.\n", type);
 442                status = -EPERM;
 443        }
 444exit:
 445        return status;
 446}
 447
 448/* Set or clear MAC address in hardware. We sometimes
 449 * have to clear it to prevent wrong frame routing
 450 * especially in a bonding environment.
 451 */
 452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 453{
 454        int status;
 455        char zero_mac_addr[ETH_ALEN];
 456        char *addr;
 457
 458        if (set) {
 459                addr = &qdev->current_mac_addr[0];
 460                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 461                             "Set Mac addr %pM\n", addr);
 462        } else {
 463                eth_zero_addr(zero_mac_addr);
 464                addr = &zero_mac_addr[0];
 465                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 466                             "Clearing MAC address\n");
 467        }
 468        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 469        if (status)
 470                return status;
 471        status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
 472                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
 473        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 474        if (status)
 475                netif_err(qdev, ifup, qdev->ndev,
 476                          "Failed to init mac address.\n");
 477        return status;
 478}
 479
 480void ql_link_on(struct ql_adapter *qdev)
 481{
 482        netif_err(qdev, link, qdev->ndev, "Link is up.\n");
 483        netif_carrier_on(qdev->ndev);
 484        ql_set_mac_addr(qdev, 1);
 485}
 486
 487void ql_link_off(struct ql_adapter *qdev)
 488{
 489        netif_err(qdev, link, qdev->ndev, "Link is down.\n");
 490        netif_carrier_off(qdev->ndev);
 491        ql_set_mac_addr(qdev, 0);
 492}
 493
 494/* Get a specific frame routing value from the CAM.
 495 * Used for debug and reg dump.
 496 */
 497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 498{
 499        int status = 0;
 500
 501        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 502        if (status)
 503                goto exit;
 504
 505        ql_write32(qdev, RT_IDX,
 506                   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 507        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
 508        if (status)
 509                goto exit;
 510        *value = ql_read32(qdev, RT_DATA);
 511exit:
 512        return status;
 513}
 514
 515/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 516 * to route different frame types to various inbound queues.  We send broadcast/
 517 * multicast/error frames to the default queue for slow handling,
 518 * and CAM hit/RSS frames to the fast handling queues.
 519 */
 520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 521                              int enable)
 522{
 523        int status = -EINVAL; /* Return error if no mask match. */
 524        u32 value = 0;
 525
 526        switch (mask) {
 527        case RT_IDX_CAM_HIT:
 528                {
 529                        value = RT_IDX_DST_CAM_Q |      /* dest */
 530                            RT_IDX_TYPE_NICQ |  /* type */
 531                            (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 532                        break;
 533                }
 534        case RT_IDX_VALID:      /* Promiscuous Mode frames. */
 535                {
 536                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 537                            RT_IDX_TYPE_NICQ |  /* type */
 538                            (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 539                        break;
 540                }
 541        case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
 542                {
 543                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 544                            RT_IDX_TYPE_NICQ |  /* type */
 545                            (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 546                        break;
 547                }
 548        case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
 549                {
 550                        value = RT_IDX_DST_DFLT_Q | /* dest */
 551                                RT_IDX_TYPE_NICQ | /* type */
 552                                (RT_IDX_IP_CSUM_ERR_SLOT <<
 553                                RT_IDX_IDX_SHIFT); /* index */
 554                        break;
 555                }
 556        case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
 557                {
 558                        value = RT_IDX_DST_DFLT_Q | /* dest */
 559                                RT_IDX_TYPE_NICQ | /* type */
 560                                (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
 561                                RT_IDX_IDX_SHIFT); /* index */
 562                        break;
 563                }
 564        case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
 565                {
 566                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 567                            RT_IDX_TYPE_NICQ |  /* type */
 568                            (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 569                        break;
 570                }
 571        case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
 572                {
 573                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 574                            RT_IDX_TYPE_NICQ |  /* type */
 575                            (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 576                        break;
 577                }
 578        case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
 579                {
 580                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 581                            RT_IDX_TYPE_NICQ |  /* type */
 582                            (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 583                        break;
 584                }
 585        case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
 586                {
 587                        value = RT_IDX_DST_RSS |        /* dest */
 588                            RT_IDX_TYPE_NICQ |  /* type */
 589                            (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 590                        break;
 591                }
 592        case 0:         /* Clear the E-bit on an entry. */
 593                {
 594                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 595                            RT_IDX_TYPE_NICQ |  /* type */
 596                            (index << RT_IDX_IDX_SHIFT);/* index */
 597                        break;
 598                }
 599        default:
 600                netif_err(qdev, ifup, qdev->ndev,
 601                          "Mask type %d not yet supported.\n", mask);
 602                status = -EPERM;
 603                goto exit;
 604        }
 605
 606        if (value) {
 607                status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 608                if (status)
 609                        goto exit;
 610                value |= (enable ? RT_IDX_E : 0);
 611                ql_write32(qdev, RT_IDX, value);
 612                ql_write32(qdev, RT_DATA, enable ? mask : 0);
 613        }
 614exit:
 615        return status;
 616}
 617
 618static void ql_enable_interrupts(struct ql_adapter *qdev)
 619{
 620        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 621}
 622
 623static void ql_disable_interrupts(struct ql_adapter *qdev)
 624{
 625        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 626}
 627
 628/* If we're running with multiple MSI-X vectors then we enable on the fly.
 629 * Otherwise, we may have multiple outstanding workers and don't want to
 630 * enable until the last one finishes. In this case, the irq_cnt gets
 631 * incremented every time we queue a worker and decremented every time
 632 * a worker finishes.  Once it hits zero we enable the interrupt.
 633 */
 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 635{
 636        u32 var = 0;
 637        unsigned long hw_flags = 0;
 638        struct intr_context *ctx = qdev->intr_context + intr;
 639
 640        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 641                /* Always enable if we're MSIX multi interrupts and
 642                 * it's not the default (zeroeth) interrupt.
 643                 */
 644                ql_write32(qdev, INTR_EN,
 645                           ctx->intr_en_mask);
 646                var = ql_read32(qdev, STS);
 647                return var;
 648        }
 649
 650        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 651        if (atomic_dec_and_test(&ctx->irq_cnt)) {
 652                ql_write32(qdev, INTR_EN,
 653                           ctx->intr_en_mask);
 654                var = ql_read32(qdev, STS);
 655        }
 656        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 657        return var;
 658}
 659
 660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 661{
 662        u32 var = 0;
 663        struct intr_context *ctx;
 664
 665        /* HW disables for us if we're MSIX multi interrupts and
 666         * it's not the default (zeroeth) interrupt.
 667         */
 668        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 669                return 0;
 670
 671        ctx = qdev->intr_context + intr;
 672        spin_lock(&qdev->hw_lock);
 673        if (!atomic_read(&ctx->irq_cnt)) {
 674                ql_write32(qdev, INTR_EN,
 675                ctx->intr_dis_mask);
 676                var = ql_read32(qdev, STS);
 677        }
 678        atomic_inc(&ctx->irq_cnt);
 679        spin_unlock(&qdev->hw_lock);
 680        return var;
 681}
 682
 683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 684{
 685        int i;
 686        for (i = 0; i < qdev->intr_count; i++) {
 687                /* The enable call does a atomic_dec_and_test
 688                 * and enables only if the result is zero.
 689                 * So we precharge it here.
 690                 */
 691                if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 692                        i == 0))
 693                        atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 694                ql_enable_completion_interrupt(qdev, i);
 695        }
 696
 697}
 698
 699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
 700{
 701        int status, i;
 702        u16 csum = 0;
 703        __le16 *flash = (__le16 *)&qdev->flash;
 704
 705        status = strncmp((char *)&qdev->flash, str, 4);
 706        if (status) {
 707                netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
 708                return  status;
 709        }
 710
 711        for (i = 0; i < size; i++)
 712                csum += le16_to_cpu(*flash++);
 713
 714        if (csum)
 715                netif_err(qdev, ifup, qdev->ndev,
 716                          "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 717
 718        return csum;
 719}
 720
 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 722{
 723        int status = 0;
 724        /* wait for reg to come ready */
 725        status = ql_wait_reg_rdy(qdev,
 726                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 727        if (status)
 728                goto exit;
 729        /* set up for reg read */
 730        ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 731        /* wait for reg to come ready */
 732        status = ql_wait_reg_rdy(qdev,
 733                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 734        if (status)
 735                goto exit;
 736         /* This data is stored on flash as an array of
 737         * __le32.  Since ql_read32() returns cpu endian
 738         * we need to swap it back.
 739         */
 740        *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
 741exit:
 742        return status;
 743}
 744
 745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 746{
 747        u32 i, size;
 748        int status;
 749        __le32 *p = (__le32 *)&qdev->flash;
 750        u32 offset;
 751        u8 mac_addr[6];
 752
 753        /* Get flash offset for function and adjust
 754         * for dword access.
 755         */
 756        if (!qdev->port)
 757                offset = FUNC0_FLASH_OFFSET / sizeof(u32);
 758        else
 759                offset = FUNC1_FLASH_OFFSET / sizeof(u32);
 760
 761        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 762                return -ETIMEDOUT;
 763
 764        size = sizeof(struct flash_params_8000) / sizeof(u32);
 765        for (i = 0; i < size; i++, p++) {
 766                status = ql_read_flash_word(qdev, i+offset, p);
 767                if (status) {
 768                        netif_err(qdev, ifup, qdev->ndev,
 769                                  "Error reading flash.\n");
 770                        goto exit;
 771                }
 772        }
 773
 774        status = ql_validate_flash(qdev,
 775                        sizeof(struct flash_params_8000) / sizeof(u16),
 776                        "8000");
 777        if (status) {
 778                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 779                status = -EINVAL;
 780                goto exit;
 781        }
 782
 783        /* Extract either manufacturer or BOFM modified
 784         * MAC address.
 785         */
 786        if (qdev->flash.flash_params_8000.data_type1 == 2)
 787                memcpy(mac_addr,
 788                        qdev->flash.flash_params_8000.mac_addr1,
 789                        qdev->ndev->addr_len);
 790        else
 791                memcpy(mac_addr,
 792                        qdev->flash.flash_params_8000.mac_addr,
 793                        qdev->ndev->addr_len);
 794
 795        if (!is_valid_ether_addr(mac_addr)) {
 796                netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
 797                status = -EINVAL;
 798                goto exit;
 799        }
 800
 801        memcpy(qdev->ndev->dev_addr,
 802                mac_addr,
 803                qdev->ndev->addr_len);
 804
 805exit:
 806        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 807        return status;
 808}
 809
 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 811{
 812        int i;
 813        int status;
 814        __le32 *p = (__le32 *)&qdev->flash;
 815        u32 offset = 0;
 816        u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
 817
 818        /* Second function's parameters follow the first
 819         * function's.
 820         */
 821        if (qdev->port)
 822                offset = size;
 823
 824        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 825                return -ETIMEDOUT;
 826
 827        for (i = 0; i < size; i++, p++) {
 828                status = ql_read_flash_word(qdev, i+offset, p);
 829                if (status) {
 830                        netif_err(qdev, ifup, qdev->ndev,
 831                                  "Error reading flash.\n");
 832                        goto exit;
 833                }
 834
 835        }
 836
 837        status = ql_validate_flash(qdev,
 838                        sizeof(struct flash_params_8012) / sizeof(u16),
 839                        "8012");
 840        if (status) {
 841                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 842                status = -EINVAL;
 843                goto exit;
 844        }
 845
 846        if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
 847                status = -EINVAL;
 848                goto exit;
 849        }
 850
 851        memcpy(qdev->ndev->dev_addr,
 852                qdev->flash.flash_params_8012.mac_addr,
 853                qdev->ndev->addr_len);
 854
 855exit:
 856        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 857        return status;
 858}
 859
 860/* xgmac register are located behind the xgmac_addr and xgmac_data
 861 * register pair.  Each read/write requires us to wait for the ready
 862 * bit before reading/writing the data.
 863 */
 864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 865{
 866        int status;
 867        /* wait for reg to come ready */
 868        status = ql_wait_reg_rdy(qdev,
 869                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 870        if (status)
 871                return status;
 872        /* write the data to the data reg */
 873        ql_write32(qdev, XGMAC_DATA, data);
 874        /* trigger the write */
 875        ql_write32(qdev, XGMAC_ADDR, reg);
 876        return status;
 877}
 878
 879/* xgmac register are located behind the xgmac_addr and xgmac_data
 880 * register pair.  Each read/write requires us to wait for the ready
 881 * bit before reading/writing the data.
 882 */
 883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 884{
 885        int status = 0;
 886        /* wait for reg to come ready */
 887        status = ql_wait_reg_rdy(qdev,
 888                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 889        if (status)
 890                goto exit;
 891        /* set up for reg read */
 892        ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 893        /* wait for reg to come ready */
 894        status = ql_wait_reg_rdy(qdev,
 895                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 896        if (status)
 897                goto exit;
 898        /* get the data */
 899        *data = ql_read32(qdev, XGMAC_DATA);
 900exit:
 901        return status;
 902}
 903
 904/* This is used for reading the 64-bit statistics regs. */
 905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 906{
 907        int status = 0;
 908        u32 hi = 0;
 909        u32 lo = 0;
 910
 911        status = ql_read_xgmac_reg(qdev, reg, &lo);
 912        if (status)
 913                goto exit;
 914
 915        status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 916        if (status)
 917                goto exit;
 918
 919        *data = (u64) lo | ((u64) hi << 32);
 920
 921exit:
 922        return status;
 923}
 924
 925static int ql_8000_port_initialize(struct ql_adapter *qdev)
 926{
 927        int status;
 928        /*
 929         * Get MPI firmware version for driver banner
 930         * and ethool info.
 931         */
 932        status = ql_mb_about_fw(qdev);
 933        if (status)
 934                goto exit;
 935        status = ql_mb_get_fw_state(qdev);
 936        if (status)
 937                goto exit;
 938        /* Wake up a worker to get/set the TX/RX frame sizes. */
 939        queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
 940exit:
 941        return status;
 942}
 943
 944/* Take the MAC Core out of reset.
 945 * Enable statistics counting.
 946 * Take the transmitter/receiver out of reset.
 947 * This functionality may be done in the MPI firmware at a
 948 * later date.
 949 */
 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
 951{
 952        int status = 0;
 953        u32 data;
 954
 955        if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 956                /* Another function has the semaphore, so
 957                 * wait for the port init bit to come ready.
 958                 */
 959                netif_info(qdev, link, qdev->ndev,
 960                           "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 961                status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 962                if (status) {
 963                        netif_crit(qdev, link, qdev->ndev,
 964                                   "Port initialize timed out.\n");
 965                }
 966                return status;
 967        }
 968
 969        netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
 970        /* Set the core reset. */
 971        status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 972        if (status)
 973                goto end;
 974        data |= GLOBAL_CFG_RESET;
 975        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 976        if (status)
 977                goto end;
 978
 979        /* Clear the core reset and turn on jumbo for receiver. */
 980        data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
 981        data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
 982        data |= GLOBAL_CFG_TX_STAT_EN;
 983        data |= GLOBAL_CFG_RX_STAT_EN;
 984        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 985        if (status)
 986                goto end;
 987
 988        /* Enable transmitter, and clear it's reset. */
 989        status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 990        if (status)
 991                goto end;
 992        data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
 993        data |= TX_CFG_EN;      /* Enable the transmitter. */
 994        status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 995        if (status)
 996                goto end;
 997
 998        /* Enable receiver and clear it's reset. */
 999        status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000        if (status)
1001                goto end;
1002        data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1003        data |= RX_CFG_EN;      /* Enable the receiver. */
1004        status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005        if (status)
1006                goto end;
1007
1008        /* Turn on jumbo. */
1009        status =
1010            ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011        if (status)
1012                goto end;
1013        status =
1014            ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015        if (status)
1016                goto end;
1017
1018        /* Signal to the world that the port is enabled.        */
1019        ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021        ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022        return status;
1023}
1024
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027        return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
1030/* Get the next large buffer. */
1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032{
1033        struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034        rx_ring->lbq_curr_idx++;
1035        if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036                rx_ring->lbq_curr_idx = 0;
1037        rx_ring->lbq_free_cnt++;
1038        return lbq_desc;
1039}
1040
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042                struct rx_ring *rx_ring)
1043{
1044        struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046        pci_dma_sync_single_for_cpu(qdev->pdev,
1047                                        dma_unmap_addr(lbq_desc, mapaddr),
1048                                    rx_ring->lbq_buf_size,
1049                                        PCI_DMA_FROMDEVICE);
1050
1051        /* If it's the last chunk of our master page then
1052         * we unmap it.
1053         */
1054        if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055                                        == ql_lbq_block_size(qdev))
1056                pci_unmap_page(qdev->pdev,
1057                                lbq_desc->p.pg_chunk.map,
1058                                ql_lbq_block_size(qdev),
1059                                PCI_DMA_FROMDEVICE);
1060        return lbq_desc;
1061}
1062
1063/* Get the next small buffer. */
1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065{
1066        struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067        rx_ring->sbq_curr_idx++;
1068        if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069                rx_ring->sbq_curr_idx = 0;
1070        rx_ring->sbq_free_cnt++;
1071        return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077        rx_ring->cnsmr_idx++;
1078        rx_ring->curr_entry++;
1079        if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080                rx_ring->cnsmr_idx = 0;
1081                rx_ring->curr_entry = rx_ring->cq_base;
1082        }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087        ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091                                                struct bq_desc *lbq_desc)
1092{
1093        if (!rx_ring->pg_chunk.page) {
1094                u64 map;
1095                rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096                                                GFP_ATOMIC,
1097                                                qdev->lbq_buf_order);
1098                if (unlikely(!rx_ring->pg_chunk.page)) {
1099                        netif_err(qdev, drv, qdev->ndev,
1100                                  "page allocation failed.\n");
1101                        return -ENOMEM;
1102                }
1103                rx_ring->pg_chunk.offset = 0;
1104                map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105                                        0, ql_lbq_block_size(qdev),
1106                                        PCI_DMA_FROMDEVICE);
1107                if (pci_dma_mapping_error(qdev->pdev, map)) {
1108                        __free_pages(rx_ring->pg_chunk.page,
1109                                        qdev->lbq_buf_order);
1110                        rx_ring->pg_chunk.page = NULL;
1111                        netif_err(qdev, drv, qdev->ndev,
1112                                  "PCI mapping failed.\n");
1113                        return -ENOMEM;
1114                }
1115                rx_ring->pg_chunk.map = map;
1116                rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117        }
1118
1119        /* Copy the current master pg_chunk info
1120         * to the current descriptor.
1121         */
1122        lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124        /* Adjust the master page chunk for next
1125         * buffer get.
1126         */
1127        rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128        if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129                rx_ring->pg_chunk.page = NULL;
1130                lbq_desc->p.pg_chunk.last_flag = 1;
1131        } else {
1132                rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133                get_page(rx_ring->pg_chunk.page);
1134                lbq_desc->p.pg_chunk.last_flag = 0;
1135        }
1136        return 0;
1137}
1138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
1141        u32 clean_idx = rx_ring->lbq_clean_idx;
1142        u32 start_idx = clean_idx;
1143        struct bq_desc *lbq_desc;
1144        u64 map;
1145        int i;
1146
1147        while (rx_ring->lbq_free_cnt > 32) {
1148                for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150                                     "lbq: try cleaning clean_idx = %d.\n",
1151                                     clean_idx);
1152                        lbq_desc = &rx_ring->lbq[clean_idx];
1153                        if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154                                rx_ring->lbq_clean_idx = clean_idx;
1155                                netif_err(qdev, ifup, qdev->ndev,
1156                                                "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157                                                i, clean_idx);
1158                                return;
1159                        }
1160
1161                        map = lbq_desc->p.pg_chunk.map +
1162                                lbq_desc->p.pg_chunk.offset;
1163                                dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164                        dma_unmap_len_set(lbq_desc, maplen,
1165                                        rx_ring->lbq_buf_size);
1166                                *lbq_desc->addr = cpu_to_le64(map);
1167
1168                        pci_dma_sync_single_for_device(qdev->pdev, map,
1169                                                rx_ring->lbq_buf_size,
1170                                                PCI_DMA_FROMDEVICE);
1171                        clean_idx++;
1172                        if (clean_idx == rx_ring->lbq_len)
1173                                clean_idx = 0;
1174                }
1175
1176                rx_ring->lbq_clean_idx = clean_idx;
1177                rx_ring->lbq_prod_idx += 16;
1178                if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179                        rx_ring->lbq_prod_idx = 0;
1180                rx_ring->lbq_free_cnt -= 16;
1181        }
1182
1183        if (start_idx != clean_idx) {
1184                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185                             "lbq: updating prod idx = %d.\n",
1186                             rx_ring->lbq_prod_idx);
1187                ql_write_db_reg(rx_ring->lbq_prod_idx,
1188                                rx_ring->lbq_prod_idx_db_reg);
1189        }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
1195        u32 clean_idx = rx_ring->sbq_clean_idx;
1196        u32 start_idx = clean_idx;
1197        struct bq_desc *sbq_desc;
1198        u64 map;
1199        int i;
1200
1201        while (rx_ring->sbq_free_cnt > 16) {
1202                for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203                        sbq_desc = &rx_ring->sbq[clean_idx];
1204                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205                                     "sbq: try cleaning clean_idx = %d.\n",
1206                                     clean_idx);
1207                        if (sbq_desc->p.skb == NULL) {
1208                                netif_printk(qdev, rx_status, KERN_DEBUG,
1209                                             qdev->ndev,
1210                                             "sbq: getting new skb for index %d.\n",
1211                                             sbq_desc->index);
1212                                sbq_desc->p.skb =
1213                                    netdev_alloc_skb(qdev->ndev,
1214                                                     SMALL_BUFFER_SIZE);
1215                                if (sbq_desc->p.skb == NULL) {
1216                                        rx_ring->sbq_clean_idx = clean_idx;
1217                                        return;
1218                                }
1219                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220                                map = pci_map_single(qdev->pdev,
1221                                                     sbq_desc->p.skb->data,
1222                                                     rx_ring->sbq_buf_size,
1223                                                     PCI_DMA_FROMDEVICE);
1224                                if (pci_dma_mapping_error(qdev->pdev, map)) {
1225                                        netif_err(qdev, ifup, qdev->ndev,
1226                                                  "PCI mapping failed.\n");
1227                                        rx_ring->sbq_clean_idx = clean_idx;
1228                                        dev_kfree_skb_any(sbq_desc->p.skb);
1229                                        sbq_desc->p.skb = NULL;
1230                                        return;
1231                                }
1232                                dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                dma_unmap_len_set(sbq_desc, maplen,
1234                                                  rx_ring->sbq_buf_size);
1235                                *sbq_desc->addr = cpu_to_le64(map);
1236                        }
1237
1238                        clean_idx++;
1239                        if (clean_idx == rx_ring->sbq_len)
1240                                clean_idx = 0;
1241                }
1242                rx_ring->sbq_clean_idx = clean_idx;
1243                rx_ring->sbq_prod_idx += 16;
1244                if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                        rx_ring->sbq_prod_idx = 0;
1246                rx_ring->sbq_free_cnt -= 16;
1247        }
1248
1249        if (start_idx != clean_idx) {
1250                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251                             "sbq: updating prod idx = %d.\n",
1252                             rx_ring->sbq_prod_idx);
1253                ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                rx_ring->sbq_prod_idx_db_reg);
1255        }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                    struct rx_ring *rx_ring)
1260{
1261        ql_update_sbq(qdev, rx_ring);
1262        ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269                          struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271        int i;
1272        for (i = 0; i < mapped; i++) {
1273                if (i == 0 || (i == 7 && mapped > 7)) {
1274                        /*
1275                         * Unmap the skb->data area, or the
1276                         * external sglist (AKA the Outbound
1277                         * Address List (OAL)).
1278                         * If its the zeroeth element, then it's
1279                         * the skb->data area.  If it's the 7th
1280                         * element and there is more than 6 frags,
1281                         * then its an OAL.
1282                         */
1283                        if (i == 7) {
1284                                netif_printk(qdev, tx_done, KERN_DEBUG,
1285                                             qdev->ndev,
1286                                             "unmapping OAL area.\n");
1287                        }
1288                        pci_unmap_single(qdev->pdev,
1289                                         dma_unmap_addr(&tx_ring_desc->map[i],
1290                                                        mapaddr),
1291                                         dma_unmap_len(&tx_ring_desc->map[i],
1292                                                       maplen),
1293                                         PCI_DMA_TODEVICE);
1294                } else {
1295                        netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296                                     "unmapping frag %d.\n", i);
1297                        pci_unmap_page(qdev->pdev,
1298                                       dma_unmap_addr(&tx_ring_desc->map[i],
1299                                                      mapaddr),
1300                                       dma_unmap_len(&tx_ring_desc->map[i],
1301                                                     maplen), PCI_DMA_TODEVICE);
1302                }
1303        }
1304
1305}
1306
1307/* Map the buffers for this transmit.  This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311                       struct ob_mac_iocb_req *mac_iocb_ptr,
1312                       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314        int len = skb_headlen(skb);
1315        dma_addr_t map;
1316        int frag_idx, err, map_idx = 0;
1317        struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318        int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320        if (frag_cnt) {
1321                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322                             "frag_cnt = %d.\n", frag_cnt);
1323        }
1324        /*
1325         * Map the skb buffer first.
1326         */
1327        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329        err = pci_dma_mapping_error(qdev->pdev, map);
1330        if (err) {
1331                netif_err(qdev, tx_queued, qdev->ndev,
1332                          "PCI mapping failed with error: %d\n", err);
1333
1334                return NETDEV_TX_BUSY;
1335        }
1336
1337        tbd->len = cpu_to_le32(len);
1338        tbd->addr = cpu_to_le64(map);
1339        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341        map_idx++;
1342
1343        /*
1344         * This loop fills the remainder of the 8 address descriptors
1345         * in the IOCB.  If there are more than 7 fragments, then the
1346         * eighth address desc will point to an external list (OAL).
1347         * When this happens, the remainder of the frags will be stored
1348         * in this list.
1349         */
1350        for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351                skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352                tbd++;
1353                if (frag_idx == 6 && frag_cnt > 7) {
1354                        /* Let's tack on an sglist.
1355                         * Our control block will now
1356                         * look like this:
1357                         * iocb->seg[0] = skb->data
1358                         * iocb->seg[1] = frag[0]
1359                         * iocb->seg[2] = frag[1]
1360                         * iocb->seg[3] = frag[2]
1361                         * iocb->seg[4] = frag[3]
1362                         * iocb->seg[5] = frag[4]
1363                         * iocb->seg[6] = frag[5]
1364                         * iocb->seg[7] = ptr to OAL (external sglist)
1365                         * oal->seg[0] = frag[6]
1366                         * oal->seg[1] = frag[7]
1367                         * oal->seg[2] = frag[8]
1368                         * oal->seg[3] = frag[9]
1369                         * oal->seg[4] = frag[10]
1370                         *      etc...
1371                         */
1372                        /* Tack on the OAL in the eighth segment of IOCB. */
1373                        map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374                                             sizeof(struct oal),
1375                                             PCI_DMA_TODEVICE);
1376                        err = pci_dma_mapping_error(qdev->pdev, map);
1377                        if (err) {
1378                                netif_err(qdev, tx_queued, qdev->ndev,
1379                                          "PCI mapping outbound address list with error: %d\n",
1380                                          err);
1381                                goto map_error;
1382                        }
1383
1384                        tbd->addr = cpu_to_le64(map);
1385                        /*
1386                         * The length is the number of fragments
1387                         * that remain to be mapped times the length
1388                         * of our sglist (OAL).
1389                         */
1390                        tbd->len =
1391                            cpu_to_le32((sizeof(struct tx_buf_desc) *
1392                                         (frag_cnt - frag_idx)) | TX_DESC_C);
1393                        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394                                           map);
1395                        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396                                          sizeof(struct oal));
1397                        tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398                        map_idx++;
1399                }
1400
1401                map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402                                       DMA_TO_DEVICE);
1403
1404                err = dma_mapping_error(&qdev->pdev->dev, map);
1405                if (err) {
1406                        netif_err(qdev, tx_queued, qdev->ndev,
1407                                  "PCI mapping frags failed with error: %d.\n",
1408                                  err);
1409                        goto map_error;
1410                }
1411
1412                tbd->addr = cpu_to_le64(map);
1413                tbd->len = cpu_to_le32(skb_frag_size(frag));
1414                dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                  skb_frag_size(frag));
1417
1418        }
1419        /* Save the number of segments we've mapped. */
1420        tx_ring_desc->map_cnt = map_idx;
1421        /* Terminate the last segment. */
1422        tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423        return NETDEV_TX_OK;
1424
1425map_error:
1426        /*
1427         * If the first frag mapping failed, then i will be zero.
1428         * This causes the unmap of the skb->data area.  Otherwise
1429         * we pass in the number of frags that mapped successfully
1430         * so they can be umapped.
1431         */
1432        ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433        return NETDEV_TX_BUSY;
1434}
1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438                                 struct rx_ring *rx_ring)
1439{
1440        struct nic_stats *stats = &qdev->nic_stats;
1441
1442        stats->rx_err_count++;
1443        rx_ring->rx_errors++;
1444
1445        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447                stats->rx_code_err++;
1448                break;
1449        case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450                stats->rx_oversize_err++;
1451                break;
1452        case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453                stats->rx_undersize_err++;
1454                break;
1455        case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456                stats->rx_preamble_err++;
1457                break;
1458        case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459                stats->rx_frame_len_err++;
1460                break;
1461        case IB_MAC_IOCB_RSP_ERR_CRC:
1462                stats->rx_crc_err++;
1463        default:
1464                break;
1465        }
1466}
1467
1468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473                                  struct ib_mac_iocb_rsp *ib_mac_rsp,
1474                                  void *page, size_t *len)
1475{
1476        u16 *tags;
1477
1478        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479                return;
1480        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481                tags = (u16 *)page;
1482                /* Look for stacked vlan tags in ethertype field */
1483                if (tags[6] == ETH_P_8021Q &&
1484                    tags[8] == ETH_P_8021Q)
1485                        *len += 2 * VLAN_HLEN;
1486                else
1487                        *len += VLAN_HLEN;
1488        }
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493                                        struct rx_ring *rx_ring,
1494                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1495                                        u32 length,
1496                                        u16 vlan_id)
1497{
1498        struct sk_buff *skb;
1499        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500        struct napi_struct *napi = &rx_ring->napi;
1501
1502        /* Frame error, so drop the packet. */
1503        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505                put_page(lbq_desc->p.pg_chunk.page);
1506                return;
1507        }
1508        napi->dev = qdev->ndev;
1509
1510        skb = napi_get_frags(napi);
1511        if (!skb) {
1512                netif_err(qdev, drv, qdev->ndev,
1513                          "Couldn't get an skb, exiting.\n");
1514                rx_ring->rx_dropped++;
1515                put_page(lbq_desc->p.pg_chunk.page);
1516                return;
1517        }
1518        prefetch(lbq_desc->p.pg_chunk.va);
1519        __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520                             lbq_desc->p.pg_chunk.page,
1521                             lbq_desc->p.pg_chunk.offset,
1522                             length);
1523
1524        skb->len += length;
1525        skb->data_len += length;
1526        skb->truesize += length;
1527        skb_shinfo(skb)->nr_frags++;
1528
1529        rx_ring->rx_packets++;
1530        rx_ring->rx_bytes += length;
1531        skb->ip_summed = CHECKSUM_UNNECESSARY;
1532        skb_record_rx_queue(skb, rx_ring->cq_id);
1533        if (vlan_id != 0xffff)
1534                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535        napi_gro_frags(napi);
1536}
1537
1538/* Process an inbound completion from an rx ring. */
1539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540                                        struct rx_ring *rx_ring,
1541                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1542                                        u32 length,
1543                                        u16 vlan_id)
1544{
1545        struct net_device *ndev = qdev->ndev;
1546        struct sk_buff *skb = NULL;
1547        void *addr;
1548        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549        struct napi_struct *napi = &rx_ring->napi;
1550        size_t hlen = ETH_HLEN;
1551
1552        skb = netdev_alloc_skb(ndev, length);
1553        if (!skb) {
1554                rx_ring->rx_dropped++;
1555                put_page(lbq_desc->p.pg_chunk.page);
1556                return;
1557        }
1558
1559        addr = lbq_desc->p.pg_chunk.va;
1560        prefetch(addr);
1561
1562        /* Frame error, so drop the packet. */
1563        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565                goto err_out;
1566        }
1567
1568        /* Update the MAC header length*/
1569        ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
1571        /* The max framesize filter on this chip is set higher than
1572         * MTU since FCoE uses 2k frames.
1573         */
1574        if (skb->len > ndev->mtu + hlen) {
1575                netif_err(qdev, drv, qdev->ndev,
1576                          "Segment too small, dropping.\n");
1577                rx_ring->rx_dropped++;
1578                goto err_out;
1579        }
1580        memcpy(skb_put(skb, hlen), addr, hlen);
1581        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583                     length);
1584        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585                                lbq_desc->p.pg_chunk.offset + hlen,
1586                                length - hlen);
1587        skb->len += length - hlen;
1588        skb->data_len += length - hlen;
1589        skb->truesize += length - hlen;
1590
1591        rx_ring->rx_packets++;
1592        rx_ring->rx_bytes += skb->len;
1593        skb->protocol = eth_type_trans(skb, ndev);
1594        skb_checksum_none_assert(skb);
1595
1596        if ((ndev->features & NETIF_F_RXCSUM) &&
1597                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598                /* TCP frame. */
1599                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601                                     "TCP checksum done!\n");
1602                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1603                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605                        /* Unfragmented ipv4 UDP frame. */
1606                        struct iphdr *iph =
1607                                (struct iphdr *)((u8 *)addr + hlen);
1608                        if (!(iph->frag_off &
1609                                htons(IP_MF|IP_OFFSET))) {
1610                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1611                                netif_printk(qdev, rx_status, KERN_DEBUG,
1612                                             qdev->ndev,
1613                                             "UDP checksum done!\n");
1614                        }
1615                }
1616        }
1617
1618        skb_record_rx_queue(skb, rx_ring->cq_id);
1619        if (vlan_id != 0xffff)
1620                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622                napi_gro_receive(napi, skb);
1623        else
1624                netif_receive_skb(skb);
1625        return;
1626err_out:
1627        dev_kfree_skb_any(skb);
1628        put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633                                        struct rx_ring *rx_ring,
1634                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1635                                        u32 length,
1636                                        u16 vlan_id)
1637{
1638        struct net_device *ndev = qdev->ndev;
1639        struct sk_buff *skb = NULL;
1640        struct sk_buff *new_skb = NULL;
1641        struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643        skb = sbq_desc->p.skb;
1644        /* Allocate new_skb and copy */
1645        new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646        if (new_skb == NULL) {
1647                rx_ring->rx_dropped++;
1648                return;
1649        }
1650        skb_reserve(new_skb, NET_IP_ALIGN);
1651
1652        pci_dma_sync_single_for_cpu(qdev->pdev,
1653                                    dma_unmap_addr(sbq_desc, mapaddr),
1654                                    dma_unmap_len(sbq_desc, maplen),
1655                                    PCI_DMA_FROMDEVICE);
1656
1657        memcpy(skb_put(new_skb, length), skb->data, length);
1658
1659        pci_dma_sync_single_for_device(qdev->pdev,
1660                                       dma_unmap_addr(sbq_desc, mapaddr),
1661                                       dma_unmap_len(sbq_desc, maplen),
1662                                       PCI_DMA_FROMDEVICE);
1663        skb = new_skb;
1664
1665        /* Frame error, so drop the packet. */
1666        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668                dev_kfree_skb_any(skb);
1669                return;
1670        }
1671
1672        /* loopback self test for ethtool */
1673        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674                ql_check_lb_frame(qdev, skb);
1675                dev_kfree_skb_any(skb);
1676                return;
1677        }
1678
1679        /* The max framesize filter on this chip is set higher than
1680         * MTU since FCoE uses 2k frames.
1681         */
1682        if (skb->len > ndev->mtu + ETH_HLEN) {
1683                dev_kfree_skb_any(skb);
1684                rx_ring->rx_dropped++;
1685                return;
1686        }
1687
1688        prefetch(skb->data);
1689        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691                             "%s Multicast.\n",
1692                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1698        }
1699        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701                             "Promiscuous Packet.\n");
1702
1703        rx_ring->rx_packets++;
1704        rx_ring->rx_bytes += skb->len;
1705        skb->protocol = eth_type_trans(skb, ndev);
1706        skb_checksum_none_assert(skb);
1707
1708        /* If rx checksum is on, and there are no
1709         * csum or frame errors.
1710         */
1711        if ((ndev->features & NETIF_F_RXCSUM) &&
1712                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713                /* TCP frame. */
1714                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716                                     "TCP checksum done!\n");
1717                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1718                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720                        /* Unfragmented ipv4 UDP frame. */
1721                        struct iphdr *iph = (struct iphdr *) skb->data;
1722                        if (!(iph->frag_off &
1723                                htons(IP_MF|IP_OFFSET))) {
1724                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1725                                netif_printk(qdev, rx_status, KERN_DEBUG,
1726                                             qdev->ndev,
1727                                             "UDP checksum done!\n");
1728                        }
1729                }
1730        }
1731
1732        skb_record_rx_queue(skb, rx_ring->cq_id);
1733        if (vlan_id != 0xffff)
1734                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736                napi_gro_receive(&rx_ring->napi, skb);
1737        else
1738                netif_receive_skb(skb);
1739}
1740
1741static void ql_realign_skb(struct sk_buff *skb, int len)
1742{
1743        void *temp_addr = skb->data;
1744
1745        /* Undo the skb_reserve(skb,32) we did before
1746         * giving to hardware, and realign data on
1747         * a 2-byte boundary.
1748         */
1749        skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750        skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751        skb_copy_to_linear_data(skb, temp_addr,
1752                (unsigned int)len);
1753}
1754
1755/*
1756 * This function builds an skb for the given inbound
1757 * completion.  It will be rewritten for readability in the near
1758 * future, but for not it works well.
1759 */
1760static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761                                       struct rx_ring *rx_ring,
1762                                       struct ib_mac_iocb_rsp *ib_mac_rsp)
1763{
1764        struct bq_desc *lbq_desc;
1765        struct bq_desc *sbq_desc;
1766        struct sk_buff *skb = NULL;
1767        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769        size_t hlen = ETH_HLEN;
1770
1771        /*
1772         * Handle the header buffer if present.
1773         */
1774        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775            ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777                             "Header of %d bytes in small buffer.\n", hdr_len);
1778                /*
1779                 * Headers fit nicely into a small buffer.
1780                 */
1781                sbq_desc = ql_get_curr_sbuf(rx_ring);
1782                pci_unmap_single(qdev->pdev,
1783                                dma_unmap_addr(sbq_desc, mapaddr),
1784                                dma_unmap_len(sbq_desc, maplen),
1785                                PCI_DMA_FROMDEVICE);
1786                skb = sbq_desc->p.skb;
1787                ql_realign_skb(skb, hdr_len);
1788                skb_put(skb, hdr_len);
1789                sbq_desc->p.skb = NULL;
1790        }
1791
1792        /*
1793         * Handle the data buffer(s).
1794         */
1795        if (unlikely(!length)) {        /* Is there data too? */
1796                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                             "No Data buffer in this packet.\n");
1798                return skb;
1799        }
1800
1801        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804                                     "Headers in small, data of %d bytes in small, combine them.\n",
1805                                     length);
1806                        /*
1807                         * Data is less than small buffer size so it's
1808                         * stuffed in a small buffer.
1809                         * For this case we append the data
1810                         * from the "data" small buffer to the "header" small
1811                         * buffer.
1812                         */
1813                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1814                        pci_dma_sync_single_for_cpu(qdev->pdev,
1815                                                    dma_unmap_addr
1816                                                    (sbq_desc, mapaddr),
1817                                                    dma_unmap_len
1818                                                    (sbq_desc, maplen),
1819                                                    PCI_DMA_FROMDEVICE);
1820                        memcpy(skb_put(skb, length),
1821                               sbq_desc->p.skb->data, length);
1822                        pci_dma_sync_single_for_device(qdev->pdev,
1823                                                       dma_unmap_addr
1824                                                       (sbq_desc,
1825                                                        mapaddr),
1826                                                       dma_unmap_len
1827                                                       (sbq_desc,
1828                                                        maplen),
1829                                                       PCI_DMA_FROMDEVICE);
1830                } else {
1831                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832                                     "%d bytes in a single small buffer.\n",
1833                                     length);
1834                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1835                        skb = sbq_desc->p.skb;
1836                        ql_realign_skb(skb, length);
1837                        skb_put(skb, length);
1838                        pci_unmap_single(qdev->pdev,
1839                                         dma_unmap_addr(sbq_desc,
1840                                                        mapaddr),
1841                                         dma_unmap_len(sbq_desc,
1842                                                       maplen),
1843                                         PCI_DMA_FROMDEVICE);
1844                        sbq_desc->p.skb = NULL;
1845                }
1846        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1847                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1848                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1849                                     "Header in small, %d bytes in large. Chain large to small!\n",
1850                                     length);
1851                        /*
1852                         * The data is in a single large buffer.  We
1853                         * chain it to the header buffer's skb and let
1854                         * it rip.
1855                         */
1856                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1857                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858                                     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1859                                     lbq_desc->p.pg_chunk.offset, length);
1860                        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1861                                                lbq_desc->p.pg_chunk.offset,
1862                                                length);
1863                        skb->len += length;
1864                        skb->data_len += length;
1865                        skb->truesize += length;
1866                } else {
1867                        /*
1868                         * The headers and data are in a single large buffer. We
1869                         * copy it to a new skb and let it go. This can happen with
1870                         * jumbo mtu on a non-TCP/UDP frame.
1871                         */
1872                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1873                        skb = netdev_alloc_skb(qdev->ndev, length);
1874                        if (skb == NULL) {
1875                                netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1876                                             "No skb available, drop the packet.\n");
1877                                return NULL;
1878                        }
1879                        pci_unmap_page(qdev->pdev,
1880                                       dma_unmap_addr(lbq_desc,
1881                                                      mapaddr),
1882                                       dma_unmap_len(lbq_desc, maplen),
1883                                       PCI_DMA_FROMDEVICE);
1884                        skb_reserve(skb, NET_IP_ALIGN);
1885                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1886                                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1887                                     length);
1888                        skb_fill_page_desc(skb, 0,
1889                                                lbq_desc->p.pg_chunk.page,
1890                                                lbq_desc->p.pg_chunk.offset,
1891                                                length);
1892                        skb->len += length;
1893                        skb->data_len += length;
1894                        skb->truesize += length;
1895                        ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1896                                              lbq_desc->p.pg_chunk.va,
1897                                              &hlen);
1898                        __pskb_pull_tail(skb, hlen);
1899                }
1900        } else {
1901                /*
1902                 * The data is in a chain of large buffers
1903                 * pointed to by a small buffer.  We loop
1904                 * thru and chain them to the our small header
1905                 * buffer's skb.
1906                 * frags:  There are 18 max frags and our small
1907                 *         buffer will hold 32 of them. The thing is,
1908                 *         we'll use 3 max for our 9000 byte jumbo
1909                 *         frames.  If the MTU goes up we could
1910                 *          eventually be in trouble.
1911                 */
1912                int size, i = 0;
1913                sbq_desc = ql_get_curr_sbuf(rx_ring);
1914                pci_unmap_single(qdev->pdev,
1915                                 dma_unmap_addr(sbq_desc, mapaddr),
1916                                 dma_unmap_len(sbq_desc, maplen),
1917                                 PCI_DMA_FROMDEVICE);
1918                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1919                        /*
1920                         * This is an non TCP/UDP IP frame, so
1921                         * the headers aren't split into a small
1922                         * buffer.  We have to use the small buffer
1923                         * that contains our sg list as our skb to
1924                         * send upstairs. Copy the sg list here to
1925                         * a local buffer and use it to find the
1926                         * pages to chain.
1927                         */
1928                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1929                                     "%d bytes of headers & data in chain of large.\n",
1930                                     length);
1931                        skb = sbq_desc->p.skb;
1932                        sbq_desc->p.skb = NULL;
1933                        skb_reserve(skb, NET_IP_ALIGN);
1934                }
1935                do {
1936                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1937                        size = (length < rx_ring->lbq_buf_size) ? length :
1938                                rx_ring->lbq_buf_size;
1939
1940                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1941                                     "Adding page %d to skb for %d bytes.\n",
1942                                     i, size);
1943                        skb_fill_page_desc(skb, i,
1944                                                lbq_desc->p.pg_chunk.page,
1945                                                lbq_desc->p.pg_chunk.offset,
1946                                                size);
1947                        skb->len += size;
1948                        skb->data_len += size;
1949                        skb->truesize += size;
1950                        length -= size;
1951                        i++;
1952                } while (length > 0);
1953                ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1954                                      &hlen);
1955                __pskb_pull_tail(skb, hlen);
1956        }
1957        return skb;
1958}
1959
1960/* Process an inbound completion from an rx ring. */
1961static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1962                                   struct rx_ring *rx_ring,
1963                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1964                                   u16 vlan_id)
1965{
1966        struct net_device *ndev = qdev->ndev;
1967        struct sk_buff *skb = NULL;
1968
1969        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1970
1971        skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1972        if (unlikely(!skb)) {
1973                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974                             "No skb available, drop packet.\n");
1975                rx_ring->rx_dropped++;
1976                return;
1977        }
1978
1979        /* Frame error, so drop the packet. */
1980        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1981                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1982                dev_kfree_skb_any(skb);
1983                return;
1984        }
1985
1986        /* The max framesize filter on this chip is set higher than
1987         * MTU since FCoE uses 2k frames.
1988         */
1989        if (skb->len > ndev->mtu + ETH_HLEN) {
1990                dev_kfree_skb_any(skb);
1991                rx_ring->rx_dropped++;
1992                return;
1993        }
1994
1995        /* loopback self test for ethtool */
1996        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1997                ql_check_lb_frame(qdev, skb);
1998                dev_kfree_skb_any(skb);
1999                return;
2000        }
2001
2002        prefetch(skb->data);
2003        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2004                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2005                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2006                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2007                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2008                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2009                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2010                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2011                rx_ring->rx_multicast++;
2012        }
2013        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2014                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2015                             "Promiscuous Packet.\n");
2016        }
2017
2018        skb->protocol = eth_type_trans(skb, ndev);
2019        skb_checksum_none_assert(skb);
2020
2021        /* If rx checksum is on, and there are no
2022         * csum or frame errors.
2023         */
2024        if ((ndev->features & NETIF_F_RXCSUM) &&
2025                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2026                /* TCP frame. */
2027                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2028                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029                                     "TCP checksum done!\n");
2030                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2031                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2032                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2033                /* Unfragmented ipv4 UDP frame. */
2034                        struct iphdr *iph = (struct iphdr *) skb->data;
2035                        if (!(iph->frag_off &
2036                                htons(IP_MF|IP_OFFSET))) {
2037                                skb->ip_summed = CHECKSUM_UNNECESSARY;
2038                                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2039                                             "TCP checksum done!\n");
2040                        }
2041                }
2042        }
2043
2044        rx_ring->rx_packets++;
2045        rx_ring->rx_bytes += skb->len;
2046        skb_record_rx_queue(skb, rx_ring->cq_id);
2047        if (vlan_id != 0xffff)
2048                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2049        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2050                napi_gro_receive(&rx_ring->napi, skb);
2051        else
2052                netif_receive_skb(skb);
2053}
2054
2055/* Process an inbound completion from an rx ring. */
2056static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2057                                        struct rx_ring *rx_ring,
2058                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
2059{
2060        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2061        u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2062                        (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2063                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2064                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2065
2066        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2067
2068        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2069                /* The data and headers are split into
2070                 * separate buffers.
2071                 */
2072                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073                                                vlan_id);
2074        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2075                /* The data fit in a single small buffer.
2076                 * Allocate a new skb, copy the data and
2077                 * return the buffer to the free pool.
2078                 */
2079                ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2080                                                length, vlan_id);
2081        } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2082                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2083                (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2084                /* TCP packet in a page chunk that's been checksummed.
2085                 * Tack it on to our GRO skb and let it go.
2086                 */
2087                ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2088                                                length, vlan_id);
2089        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2090                /* Non-TCP packet in a page chunk. Allocate an
2091                 * skb, tack it on frags, and send it up.
2092                 */
2093                ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2094                                                length, vlan_id);
2095        } else {
2096                /* Non-TCP/UDP large frames that span multiple buffers
2097                 * can be processed corrrectly by the split frame logic.
2098                 */
2099                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2100                                                vlan_id);
2101        }
2102
2103        return (unsigned long)length;
2104}
2105
2106/* Process an outbound completion from an rx ring. */
2107static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2108                                   struct ob_mac_iocb_rsp *mac_rsp)
2109{
2110        struct tx_ring *tx_ring;
2111        struct tx_ring_desc *tx_ring_desc;
2112
2113        QL_DUMP_OB_MAC_RSP(mac_rsp);
2114        tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2115        tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2116        ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2117        tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2118        tx_ring->tx_packets++;
2119        dev_kfree_skb(tx_ring_desc->skb);
2120        tx_ring_desc->skb = NULL;
2121
2122        if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2123                                        OB_MAC_IOCB_RSP_S |
2124                                        OB_MAC_IOCB_RSP_L |
2125                                        OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2126                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2127                        netif_warn(qdev, tx_done, qdev->ndev,
2128                                   "Total descriptor length did not match transfer length.\n");
2129                }
2130                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2131                        netif_warn(qdev, tx_done, qdev->ndev,
2132                                   "Frame too short to be valid, not sent.\n");
2133                }
2134                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2135                        netif_warn(qdev, tx_done, qdev->ndev,
2136                                   "Frame too long, but sent anyway.\n");
2137                }
2138                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2139                        netif_warn(qdev, tx_done, qdev->ndev,
2140                                   "PCI backplane error. Frame not sent.\n");
2141                }
2142        }
2143        atomic_inc(&tx_ring->tx_count);
2144}
2145
2146/* Fire up a handler to reset the MPI processor. */
2147void ql_queue_fw_error(struct ql_adapter *qdev)
2148{
2149        ql_link_off(qdev);
2150        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2151}
2152
2153void ql_queue_asic_error(struct ql_adapter *qdev)
2154{
2155        ql_link_off(qdev);
2156        ql_disable_interrupts(qdev);
2157        /* Clear adapter up bit to signal the recovery
2158         * process that it shouldn't kill the reset worker
2159         * thread
2160         */
2161        clear_bit(QL_ADAPTER_UP, &qdev->flags);
2162        /* Set asic recovery bit to indicate reset process that we are
2163         * in fatal error recovery process rather than normal close
2164         */
2165        set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2166        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2167}
2168
2169static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2170                                    struct ib_ae_iocb_rsp *ib_ae_rsp)
2171{
2172        switch (ib_ae_rsp->event) {
2173        case MGMT_ERR_EVENT:
2174                netif_err(qdev, rx_err, qdev->ndev,
2175                          "Management Processor Fatal Error.\n");
2176                ql_queue_fw_error(qdev);
2177                return;
2178
2179        case CAM_LOOKUP_ERR_EVENT:
2180                netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2181                netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2182                ql_queue_asic_error(qdev);
2183                return;
2184
2185        case SOFT_ECC_ERROR_EVENT:
2186                netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2187                ql_queue_asic_error(qdev);
2188                break;
2189
2190        case PCI_ERR_ANON_BUF_RD:
2191                netdev_err(qdev->ndev, "PCI error occurred when reading "
2192                                        "anonymous buffers from rx_ring %d.\n",
2193                                        ib_ae_rsp->q_id);
2194                ql_queue_asic_error(qdev);
2195                break;
2196
2197        default:
2198                netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2199                          ib_ae_rsp->event);
2200                ql_queue_asic_error(qdev);
2201                break;
2202        }
2203}
2204
2205static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2206{
2207        struct ql_adapter *qdev = rx_ring->qdev;
2208        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2209        struct ob_mac_iocb_rsp *net_rsp = NULL;
2210        int count = 0;
2211
2212        struct tx_ring *tx_ring;
2213        /* While there are entries in the completion queue. */
2214        while (prod != rx_ring->cnsmr_idx) {
2215
2216                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217                             "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2218                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2219
2220                net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2221                rmb();
2222                switch (net_rsp->opcode) {
2223
2224                case OPCODE_OB_MAC_TSO_IOCB:
2225                case OPCODE_OB_MAC_IOCB:
2226                        ql_process_mac_tx_intr(qdev, net_rsp);
2227                        break;
2228                default:
2229                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2230                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2231                                     net_rsp->opcode);
2232                }
2233                count++;
2234                ql_update_cq(rx_ring);
2235                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2236        }
2237        if (!net_rsp)
2238                return 0;
2239        ql_write_cq_idx(rx_ring);
2240        tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2241        if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2242                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2243                        /*
2244                         * The queue got stopped because the tx_ring was full.
2245                         * Wake it up, because it's now at least 25% empty.
2246                         */
2247                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2248        }
2249
2250        return count;
2251}
2252
2253static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2254{
2255        struct ql_adapter *qdev = rx_ring->qdev;
2256        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2257        struct ql_net_rsp_iocb *net_rsp;
2258        int count = 0;
2259
2260        /* While there are entries in the completion queue. */
2261        while (prod != rx_ring->cnsmr_idx) {
2262
2263                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264                             "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2265                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2266
2267                net_rsp = rx_ring->curr_entry;
2268                rmb();
2269                switch (net_rsp->opcode) {
2270                case OPCODE_IB_MAC_IOCB:
2271                        ql_process_mac_rx_intr(qdev, rx_ring,
2272                                               (struct ib_mac_iocb_rsp *)
2273                                               net_rsp);
2274                        break;
2275
2276                case OPCODE_IB_AE_IOCB:
2277                        ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2278                                                net_rsp);
2279                        break;
2280                default:
2281                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2282                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2283                                     net_rsp->opcode);
2284                        break;
2285                }
2286                count++;
2287                ql_update_cq(rx_ring);
2288                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2289                if (count == budget)
2290                        break;
2291        }
2292        ql_update_buffer_queues(qdev, rx_ring);
2293        ql_write_cq_idx(rx_ring);
2294        return count;
2295}
2296
2297static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2298{
2299        struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2300        struct ql_adapter *qdev = rx_ring->qdev;
2301        struct rx_ring *trx_ring;
2302        int i, work_done = 0;
2303        struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2304
2305        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2306                     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2307
2308        /* Service the TX rings first.  They start
2309         * right after the RSS rings. */
2310        for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2311                trx_ring = &qdev->rx_ring[i];
2312                /* If this TX completion ring belongs to this vector and
2313                 * it's not empty then service it.
2314                 */
2315                if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2316                        (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2317                                        trx_ring->cnsmr_idx)) {
2318                        netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2319                                     "%s: Servicing TX completion ring %d.\n",
2320                                     __func__, trx_ring->cq_id);
2321                        ql_clean_outbound_rx_ring(trx_ring);
2322                }
2323        }
2324
2325        /*
2326         * Now service the RSS ring if it's active.
2327         */
2328        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2329                                        rx_ring->cnsmr_idx) {
2330                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2331                             "%s: Servicing RX completion ring %d.\n",
2332                             __func__, rx_ring->cq_id);
2333                work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2334        }
2335
2336        if (work_done < budget) {
2337                napi_complete(napi);
2338                ql_enable_completion_interrupt(qdev, rx_ring->irq);
2339        }
2340        return work_done;
2341}
2342
2343static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2344{
2345        struct ql_adapter *qdev = netdev_priv(ndev);
2346
2347        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2348                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2349                                 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2350        } else {
2351                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2352        }
2353}
2354
2355/**
2356 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2357 * based on the features to enable/disable hardware vlan accel
2358 */
2359static int qlge_update_hw_vlan_features(struct net_device *ndev,
2360                                        netdev_features_t features)
2361{
2362        struct ql_adapter *qdev = netdev_priv(ndev);
2363        bool need_restart = netif_running(ndev);
2364        int status = 0;
2365
2366        if (need_restart) {
2367                status = ql_adapter_down(qdev);
2368                if (status) {
2369                        netif_err(qdev, link, qdev->ndev,
2370                                  "Failed to bring down the adapter\n");
2371                        return status;
2372                }
2373        }
2374
2375        /* update the features with resent change */
2376        ndev->features = features;
2377
2378        if (need_restart) {
2379                status = ql_adapter_up(qdev);
2380                if (status) {
2381                        netif_err(qdev, link, qdev->ndev,
2382                                  "Failed to bring up the adapter\n");
2383                        return status;
2384                }
2385        }
2386        return status;
2387}
2388
2389static netdev_features_t qlge_fix_features(struct net_device *ndev,
2390        netdev_features_t features)
2391{
2392        int err;
2393
2394        /* Update the behavior of vlan accel in the adapter */
2395        err = qlge_update_hw_vlan_features(ndev, features);
2396        if (err)
2397                return err;
2398
2399        return features;
2400}
2401
2402static int qlge_set_features(struct net_device *ndev,
2403        netdev_features_t features)
2404{
2405        netdev_features_t changed = ndev->features ^ features;
2406
2407        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2408                qlge_vlan_mode(ndev, features);
2409
2410        return 0;
2411}
2412
2413static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2414{
2415        u32 enable_bit = MAC_ADDR_E;
2416        int err;
2417
2418        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2419                                  MAC_ADDR_TYPE_VLAN, vid);
2420        if (err)
2421                netif_err(qdev, ifup, qdev->ndev,
2422                          "Failed to init vlan address.\n");
2423        return err;
2424}
2425
2426static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2427{
2428        struct ql_adapter *qdev = netdev_priv(ndev);
2429        int status;
2430        int err;
2431
2432        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2433        if (status)
2434                return status;
2435
2436        err = __qlge_vlan_rx_add_vid(qdev, vid);
2437        set_bit(vid, qdev->active_vlans);
2438
2439        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2440
2441        return err;
2442}
2443
2444static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2445{
2446        u32 enable_bit = 0;
2447        int err;
2448
2449        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2450                                  MAC_ADDR_TYPE_VLAN, vid);
2451        if (err)
2452                netif_err(qdev, ifup, qdev->ndev,
2453                          "Failed to clear vlan address.\n");
2454        return err;
2455}
2456
2457static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2458{
2459        struct ql_adapter *qdev = netdev_priv(ndev);
2460        int status;
2461        int err;
2462
2463        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2464        if (status)
2465                return status;
2466
2467        err = __qlge_vlan_rx_kill_vid(qdev, vid);
2468        clear_bit(vid, qdev->active_vlans);
2469
2470        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2471
2472        return err;
2473}
2474
2475static void qlge_restore_vlan(struct ql_adapter *qdev)
2476{
2477        int status;
2478        u16 vid;
2479
2480        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2481        if (status)
2482                return;
2483
2484        for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2485                __qlge_vlan_rx_add_vid(qdev, vid);
2486
2487        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2488}
2489
2490/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2491static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2492{
2493        struct rx_ring *rx_ring = dev_id;
2494        napi_schedule(&rx_ring->napi);
2495        return IRQ_HANDLED;
2496}
2497
2498/* This handles a fatal error, MPI activity, and the default
2499 * rx_ring in an MSI-X multiple vector environment.
2500 * In MSI/Legacy environment it also process the rest of
2501 * the rx_rings.
2502 */
2503static irqreturn_t qlge_isr(int irq, void *dev_id)
2504{
2505        struct rx_ring *rx_ring = dev_id;
2506        struct ql_adapter *qdev = rx_ring->qdev;
2507        struct intr_context *intr_context = &qdev->intr_context[0];
2508        u32 var;
2509        int work_done = 0;
2510
2511        spin_lock(&qdev->hw_lock);
2512        if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2513                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2514                             "Shared Interrupt, Not ours!\n");
2515                spin_unlock(&qdev->hw_lock);
2516                return IRQ_NONE;
2517        }
2518        spin_unlock(&qdev->hw_lock);
2519
2520        var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2521
2522        /*
2523         * Check for fatal error.
2524         */
2525        if (var & STS_FE) {
2526                ql_queue_asic_error(qdev);
2527                netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2528                var = ql_read32(qdev, ERR_STS);
2529                netdev_err(qdev->ndev, "Resetting chip. "
2530                                        "Error Status Register = 0x%x\n", var);
2531                return IRQ_HANDLED;
2532        }
2533
2534        /*
2535         * Check MPI processor activity.
2536         */
2537        if ((var & STS_PI) &&
2538                (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2539                /*
2540                 * We've got an async event or mailbox completion.
2541                 * Handle it and clear the source of the interrupt.
2542                 */
2543                netif_err(qdev, intr, qdev->ndev,
2544                          "Got MPI processor interrupt.\n");
2545                ql_disable_completion_interrupt(qdev, intr_context->intr);
2546                ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2547                queue_delayed_work_on(smp_processor_id(),
2548                                qdev->workqueue, &qdev->mpi_work, 0);
2549                work_done++;
2550        }
2551
2552        /*
2553         * Get the bit-mask that shows the active queues for this
2554         * pass.  Compare it to the queues that this irq services
2555         * and call napi if there's a match.
2556         */
2557        var = ql_read32(qdev, ISR1);
2558        if (var & intr_context->irq_mask) {
2559                netif_info(qdev, intr, qdev->ndev,
2560                           "Waking handler for rx_ring[0].\n");
2561                ql_disable_completion_interrupt(qdev, intr_context->intr);
2562                napi_schedule(&rx_ring->napi);
2563                work_done++;
2564        }
2565        ql_enable_completion_interrupt(qdev, intr_context->intr);
2566        return work_done ? IRQ_HANDLED : IRQ_NONE;
2567}
2568
2569static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2570{
2571
2572        if (skb_is_gso(skb)) {
2573                int err;
2574                __be16 l3_proto = vlan_get_protocol(skb);
2575
2576                err = skb_cow_head(skb, 0);
2577                if (err < 0)
2578                        return err;
2579
2580                mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2581                mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2582                mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2583                mac_iocb_ptr->total_hdrs_len =
2584                    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2585                mac_iocb_ptr->net_trans_offset =
2586                    cpu_to_le16(skb_network_offset(skb) |
2587                                skb_transport_offset(skb)
2588                                << OB_MAC_TRANSPORT_HDR_SHIFT);
2589                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2590                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2591                if (likely(l3_proto == htons(ETH_P_IP))) {
2592                        struct iphdr *iph = ip_hdr(skb);
2593                        iph->check = 0;
2594                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2595                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2596                                                                 iph->daddr, 0,
2597                                                                 IPPROTO_TCP,
2598                                                                 0);
2599                } else if (l3_proto == htons(ETH_P_IPV6)) {
2600                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2601                        tcp_hdr(skb)->check =
2602                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2603                                             &ipv6_hdr(skb)->daddr,
2604                                             0, IPPROTO_TCP, 0);
2605                }
2606                return 1;
2607        }
2608        return 0;
2609}
2610
2611static void ql_hw_csum_setup(struct sk_buff *skb,
2612                             struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2613{
2614        int len;
2615        struct iphdr *iph = ip_hdr(skb);
2616        __sum16 *check;
2617        mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2618        mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2619        mac_iocb_ptr->net_trans_offset =
2620                cpu_to_le16(skb_network_offset(skb) |
2621                skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2622
2623        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2624        len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2625        if (likely(iph->protocol == IPPROTO_TCP)) {
2626                check = &(tcp_hdr(skb)->check);
2627                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2628                mac_iocb_ptr->total_hdrs_len =
2629                    cpu_to_le16(skb_transport_offset(skb) +
2630                                (tcp_hdr(skb)->doff << 2));
2631        } else {
2632                check = &(udp_hdr(skb)->check);
2633                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2634                mac_iocb_ptr->total_hdrs_len =
2635                    cpu_to_le16(skb_transport_offset(skb) +
2636                                sizeof(struct udphdr));
2637        }
2638        *check = ~csum_tcpudp_magic(iph->saddr,
2639                                    iph->daddr, len, iph->protocol, 0);
2640}
2641
2642static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2643{
2644        struct tx_ring_desc *tx_ring_desc;
2645        struct ob_mac_iocb_req *mac_iocb_ptr;
2646        struct ql_adapter *qdev = netdev_priv(ndev);
2647        int tso;
2648        struct tx_ring *tx_ring;
2649        u32 tx_ring_idx = (u32) skb->queue_mapping;
2650
2651        tx_ring = &qdev->tx_ring[tx_ring_idx];
2652
2653        if (skb_padto(skb, ETH_ZLEN))
2654                return NETDEV_TX_OK;
2655
2656        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2657                netif_info(qdev, tx_queued, qdev->ndev,
2658                           "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2659                           __func__, tx_ring_idx);
2660                netif_stop_subqueue(ndev, tx_ring->wq_id);
2661                tx_ring->tx_errors++;
2662                return NETDEV_TX_BUSY;
2663        }
2664        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2665        mac_iocb_ptr = tx_ring_desc->queue_entry;
2666        memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2667
2668        mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2669        mac_iocb_ptr->tid = tx_ring_desc->index;
2670        /* We use the upper 32-bits to store the tx queue for this IO.
2671         * When we get the completion we can use it to establish the context.
2672         */
2673        mac_iocb_ptr->txq_idx = tx_ring_idx;
2674        tx_ring_desc->skb = skb;
2675
2676        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2677
2678        if (skb_vlan_tag_present(skb)) {
2679                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2680                             "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2681                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2682                mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2683        }
2684        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2685        if (tso < 0) {
2686                dev_kfree_skb_any(skb);
2687                return NETDEV_TX_OK;
2688        } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2689                ql_hw_csum_setup(skb,
2690                                 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2691        }
2692        if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2693                        NETDEV_TX_OK) {
2694                netif_err(qdev, tx_queued, qdev->ndev,
2695                          "Could not map the segments.\n");
2696                tx_ring->tx_errors++;
2697                return NETDEV_TX_BUSY;
2698        }
2699        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2700        tx_ring->prod_idx++;
2701        if (tx_ring->prod_idx == tx_ring->wq_len)
2702                tx_ring->prod_idx = 0;
2703        wmb();
2704
2705        ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2706        netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2707                     "tx queued, slot %d, len %d\n",
2708                     tx_ring->prod_idx, skb->len);
2709
2710        atomic_dec(&tx_ring->tx_count);
2711
2712        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2713                netif_stop_subqueue(ndev, tx_ring->wq_id);
2714                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2715                        /*
2716                         * The queue got stopped because the tx_ring was full.
2717                         * Wake it up, because it's now at least 25% empty.
2718                         */
2719                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2720        }
2721        return NETDEV_TX_OK;
2722}
2723
2724
2725static void ql_free_shadow_space(struct ql_adapter *qdev)
2726{
2727        if (qdev->rx_ring_shadow_reg_area) {
2728                pci_free_consistent(qdev->pdev,
2729                                    PAGE_SIZE,
2730                                    qdev->rx_ring_shadow_reg_area,
2731                                    qdev->rx_ring_shadow_reg_dma);
2732                qdev->rx_ring_shadow_reg_area = NULL;
2733        }
2734        if (qdev->tx_ring_shadow_reg_area) {
2735                pci_free_consistent(qdev->pdev,
2736                                    PAGE_SIZE,
2737                                    qdev->tx_ring_shadow_reg_area,
2738                                    qdev->tx_ring_shadow_reg_dma);
2739                qdev->tx_ring_shadow_reg_area = NULL;
2740        }
2741}
2742
2743static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2744{
2745        qdev->rx_ring_shadow_reg_area =
2746                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2747                                      &qdev->rx_ring_shadow_reg_dma);
2748        if (qdev->rx_ring_shadow_reg_area == NULL) {
2749                netif_err(qdev, ifup, qdev->ndev,
2750                          "Allocation of RX shadow space failed.\n");
2751                return -ENOMEM;
2752        }
2753
2754        qdev->tx_ring_shadow_reg_area =
2755                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2756                                      &qdev->tx_ring_shadow_reg_dma);
2757        if (qdev->tx_ring_shadow_reg_area == NULL) {
2758                netif_err(qdev, ifup, qdev->ndev,
2759                          "Allocation of TX shadow space failed.\n");
2760                goto err_wqp_sh_area;
2761        }
2762        return 0;
2763
2764err_wqp_sh_area:
2765        pci_free_consistent(qdev->pdev,
2766                            PAGE_SIZE,
2767                            qdev->rx_ring_shadow_reg_area,
2768                            qdev->rx_ring_shadow_reg_dma);
2769        return -ENOMEM;
2770}
2771
2772static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2773{
2774        struct tx_ring_desc *tx_ring_desc;
2775        int i;
2776        struct ob_mac_iocb_req *mac_iocb_ptr;
2777
2778        mac_iocb_ptr = tx_ring->wq_base;
2779        tx_ring_desc = tx_ring->q;
2780        for (i = 0; i < tx_ring->wq_len; i++) {
2781                tx_ring_desc->index = i;
2782                tx_ring_desc->skb = NULL;
2783                tx_ring_desc->queue_entry = mac_iocb_ptr;
2784                mac_iocb_ptr++;
2785                tx_ring_desc++;
2786        }
2787        atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2788}
2789
2790static void ql_free_tx_resources(struct ql_adapter *qdev,
2791                                 struct tx_ring *tx_ring)
2792{
2793        if (tx_ring->wq_base) {
2794                pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2795                                    tx_ring->wq_base, tx_ring->wq_base_dma);
2796                tx_ring->wq_base = NULL;
2797        }
2798        kfree(tx_ring->q);
2799        tx_ring->q = NULL;
2800}
2801
2802static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2803                                 struct tx_ring *tx_ring)
2804{
2805        tx_ring->wq_base =
2806            pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2807                                 &tx_ring->wq_base_dma);
2808
2809        if ((tx_ring->wq_base == NULL) ||
2810            tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2811                goto pci_alloc_err;
2812
2813        tx_ring->q =
2814            kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2815        if (tx_ring->q == NULL)
2816                goto err;
2817
2818        return 0;
2819err:
2820        pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2821                            tx_ring->wq_base, tx_ring->wq_base_dma);
2822        tx_ring->wq_base = NULL;
2823pci_alloc_err:
2824        netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2825        return -ENOMEM;
2826}
2827
2828static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2829{
2830        struct bq_desc *lbq_desc;
2831
2832        uint32_t  curr_idx, clean_idx;
2833
2834        curr_idx = rx_ring->lbq_curr_idx;
2835        clean_idx = rx_ring->lbq_clean_idx;
2836        while (curr_idx != clean_idx) {
2837                lbq_desc = &rx_ring->lbq[curr_idx];
2838
2839                if (lbq_desc->p.pg_chunk.last_flag) {
2840                        pci_unmap_page(qdev->pdev,
2841                                lbq_desc->p.pg_chunk.map,
2842                                ql_lbq_block_size(qdev),
2843                                       PCI_DMA_FROMDEVICE);
2844                        lbq_desc->p.pg_chunk.last_flag = 0;
2845                }
2846
2847                put_page(lbq_desc->p.pg_chunk.page);
2848                lbq_desc->p.pg_chunk.page = NULL;
2849
2850                if (++curr_idx == rx_ring->lbq_len)
2851                        curr_idx = 0;
2852
2853        }
2854        if (rx_ring->pg_chunk.page) {
2855                pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2856                        ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2857                put_page(rx_ring->pg_chunk.page);
2858                rx_ring->pg_chunk.page = NULL;
2859        }
2860}
2861
2862static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2863{
2864        int i;
2865        struct bq_desc *sbq_desc;
2866
2867        for (i = 0; i < rx_ring->sbq_len; i++) {
2868                sbq_desc = &rx_ring->sbq[i];
2869                if (sbq_desc == NULL) {
2870                        netif_err(qdev, ifup, qdev->ndev,
2871                                  "sbq_desc %d is NULL.\n", i);
2872                        return;
2873                }
2874                if (sbq_desc->p.skb) {
2875                        pci_unmap_single(qdev->pdev,
2876                                         dma_unmap_addr(sbq_desc, mapaddr),
2877                                         dma_unmap_len(sbq_desc, maplen),
2878                                         PCI_DMA_FROMDEVICE);
2879                        dev_kfree_skb(sbq_desc->p.skb);
2880                        sbq_desc->p.skb = NULL;
2881                }
2882        }
2883}
2884
2885/* Free all large and small rx buffers associated
2886 * with the completion queues for this device.
2887 */
2888static void ql_free_rx_buffers(struct ql_adapter *qdev)
2889{
2890        int i;
2891        struct rx_ring *rx_ring;
2892
2893        for (i = 0; i < qdev->rx_ring_count; i++) {
2894                rx_ring = &qdev->rx_ring[i];
2895                if (rx_ring->lbq)
2896                        ql_free_lbq_buffers(qdev, rx_ring);
2897                if (rx_ring->sbq)
2898                        ql_free_sbq_buffers(qdev, rx_ring);
2899        }
2900}
2901
2902static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2903{
2904        struct rx_ring *rx_ring;
2905        int i;
2906
2907        for (i = 0; i < qdev->rx_ring_count; i++) {
2908                rx_ring = &qdev->rx_ring[i];
2909                if (rx_ring->type != TX_Q)
2910                        ql_update_buffer_queues(qdev, rx_ring);
2911        }
2912}
2913
2914static void ql_init_lbq_ring(struct ql_adapter *qdev,
2915                                struct rx_ring *rx_ring)
2916{
2917        int i;
2918        struct bq_desc *lbq_desc;
2919        __le64 *bq = rx_ring->lbq_base;
2920
2921        memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2922        for (i = 0; i < rx_ring->lbq_len; i++) {
2923                lbq_desc = &rx_ring->lbq[i];
2924                memset(lbq_desc, 0, sizeof(*lbq_desc));
2925                lbq_desc->index = i;
2926                lbq_desc->addr = bq;
2927                bq++;
2928        }
2929}
2930
2931static void ql_init_sbq_ring(struct ql_adapter *qdev,
2932                                struct rx_ring *rx_ring)
2933{
2934        int i;
2935        struct bq_desc *sbq_desc;
2936        __le64 *bq = rx_ring->sbq_base;
2937
2938        memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2939        for (i = 0; i < rx_ring->sbq_len; i++) {
2940                sbq_desc = &rx_ring->sbq[i];
2941                memset(sbq_desc, 0, sizeof(*sbq_desc));
2942                sbq_desc->index = i;
2943                sbq_desc->addr = bq;
2944                bq++;
2945        }
2946}
2947
2948static void ql_free_rx_resources(struct ql_adapter *qdev,
2949                                 struct rx_ring *rx_ring)
2950{
2951        /* Free the small buffer queue. */
2952        if (rx_ring->sbq_base) {
2953                pci_free_consistent(qdev->pdev,
2954                                    rx_ring->sbq_size,
2955                                    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2956                rx_ring->sbq_base = NULL;
2957        }
2958
2959        /* Free the small buffer queue control blocks. */
2960        kfree(rx_ring->sbq);
2961        rx_ring->sbq = NULL;
2962
2963        /* Free the large buffer queue. */
2964        if (rx_ring->lbq_base) {
2965                pci_free_consistent(qdev->pdev,
2966                                    rx_ring->lbq_size,
2967                                    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2968                rx_ring->lbq_base = NULL;
2969        }
2970
2971        /* Free the large buffer queue control blocks. */
2972        kfree(rx_ring->lbq);
2973        rx_ring->lbq = NULL;
2974
2975        /* Free the rx queue. */
2976        if (rx_ring->cq_base) {
2977                pci_free_consistent(qdev->pdev,
2978                                    rx_ring->cq_size,
2979                                    rx_ring->cq_base, rx_ring->cq_base_dma);
2980                rx_ring->cq_base = NULL;
2981        }
2982}
2983
2984/* Allocate queues and buffers for this completions queue based
2985 * on the values in the parameter structure. */
2986static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2987                                 struct rx_ring *rx_ring)
2988{
2989
2990        /*
2991         * Allocate the completion queue for this rx_ring.
2992         */
2993        rx_ring->cq_base =
2994            pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2995                                 &rx_ring->cq_base_dma);
2996
2997        if (rx_ring->cq_base == NULL) {
2998                netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2999                return -ENOMEM;
3000        }
3001
3002        if (rx_ring->sbq_len) {
3003                /*
3004                 * Allocate small buffer queue.
3005                 */
3006                rx_ring->sbq_base =
3007                    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3008                                         &rx_ring->sbq_base_dma);
3009
3010                if (rx_ring->sbq_base == NULL) {
3011                        netif_err(qdev, ifup, qdev->ndev,
3012                                  "Small buffer queue allocation failed.\n");
3013                        goto err_mem;
3014                }
3015
3016                /*
3017                 * Allocate small buffer queue control blocks.
3018                 */
3019                rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3020                                             sizeof(struct bq_desc),
3021                                             GFP_KERNEL);
3022                if (rx_ring->sbq == NULL)
3023                        goto err_mem;
3024
3025                ql_init_sbq_ring(qdev, rx_ring);
3026        }
3027
3028        if (rx_ring->lbq_len) {
3029                /*
3030                 * Allocate large buffer queue.
3031                 */
3032                rx_ring->lbq_base =
3033                    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3034                                         &rx_ring->lbq_base_dma);
3035
3036                if (rx_ring->lbq_base == NULL) {
3037                        netif_err(qdev, ifup, qdev->ndev,
3038                                  "Large buffer queue allocation failed.\n");
3039                        goto err_mem;
3040                }
3041                /*
3042                 * Allocate large buffer queue control blocks.
3043                 */
3044                rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3045                                             sizeof(struct bq_desc),
3046                                             GFP_KERNEL);
3047                if (rx_ring->lbq == NULL)
3048                        goto err_mem;
3049
3050                ql_init_lbq_ring(qdev, rx_ring);
3051        }
3052
3053        return 0;
3054
3055err_mem:
3056        ql_free_rx_resources(qdev, rx_ring);
3057        return -ENOMEM;
3058}
3059
3060static void ql_tx_ring_clean(struct ql_adapter *qdev)
3061{
3062        struct tx_ring *tx_ring;
3063        struct tx_ring_desc *tx_ring_desc;
3064        int i, j;
3065
3066        /*
3067         * Loop through all queues and free
3068         * any resources.
3069         */
3070        for (j = 0; j < qdev->tx_ring_count; j++) {
3071                tx_ring = &qdev->tx_ring[j];
3072                for (i = 0; i < tx_ring->wq_len; i++) {
3073                        tx_ring_desc = &tx_ring->q[i];
3074                        if (tx_ring_desc && tx_ring_desc->skb) {
3075                                netif_err(qdev, ifdown, qdev->ndev,
3076                                          "Freeing lost SKB %p, from queue %d, index %d.\n",
3077                                          tx_ring_desc->skb, j,
3078                                          tx_ring_desc->index);
3079                                ql_unmap_send(qdev, tx_ring_desc,
3080                                              tx_ring_desc->map_cnt);
3081                                dev_kfree_skb(tx_ring_desc->skb);
3082                                tx_ring_desc->skb = NULL;
3083                        }
3084                }
3085        }
3086}
3087
3088static void ql_free_mem_resources(struct ql_adapter *qdev)
3089{
3090        int i;
3091
3092        for (i = 0; i < qdev->tx_ring_count; i++)
3093                ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3094        for (i = 0; i < qdev->rx_ring_count; i++)
3095                ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3096        ql_free_shadow_space(qdev);
3097}
3098
3099static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3100{
3101        int i;
3102
3103        /* Allocate space for our shadow registers and such. */
3104        if (ql_alloc_shadow_space(qdev))
3105                return -ENOMEM;
3106
3107        for (i = 0; i < qdev->rx_ring_count; i++) {
3108                if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3109                        netif_err(qdev, ifup, qdev->ndev,
3110                                  "RX resource allocation failed.\n");
3111                        goto err_mem;
3112                }
3113        }
3114        /* Allocate tx queue resources */
3115        for (i = 0; i < qdev->tx_ring_count; i++) {
3116                if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3117                        netif_err(qdev, ifup, qdev->ndev,
3118                                  "TX resource allocation failed.\n");
3119                        goto err_mem;
3120                }
3121        }
3122        return 0;
3123
3124err_mem:
3125        ql_free_mem_resources(qdev);
3126        return -ENOMEM;
3127}
3128
3129/* Set up the rx ring control block and pass it to the chip.
3130 * The control block is defined as
3131 * "Completion Queue Initialization Control Block", or cqicb.
3132 */
3133static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3134{
3135        struct cqicb *cqicb = &rx_ring->cqicb;
3136        void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3137                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3138        u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3139                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3140        void __iomem *doorbell_area =
3141            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3142        int err = 0;
3143        u16 bq_len;
3144        u64 tmp;
3145        __le64 *base_indirect_ptr;
3146        int page_entries;
3147
3148        /* Set up the shadow registers for this ring. */
3149        rx_ring->prod_idx_sh_reg = shadow_reg;
3150        rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3151        *rx_ring->prod_idx_sh_reg = 0;
3152        shadow_reg += sizeof(u64);
3153        shadow_reg_dma += sizeof(u64);
3154        rx_ring->lbq_base_indirect = shadow_reg;
3155        rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3156        shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3157        shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3158        rx_ring->sbq_base_indirect = shadow_reg;
3159        rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3160
3161        /* PCI doorbell mem area + 0x00 for consumer index register */
3162        rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3163        rx_ring->cnsmr_idx = 0;
3164        rx_ring->curr_entry = rx_ring->cq_base;
3165
3166        /* PCI doorbell mem area + 0x04 for valid register */
3167        rx_ring->valid_db_reg = doorbell_area + 0x04;
3168
3169        /* PCI doorbell mem area + 0x18 for large buffer consumer */
3170        rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3171
3172        /* PCI doorbell mem area + 0x1c */
3173        rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3174
3175        memset((void *)cqicb, 0, sizeof(struct cqicb));
3176        cqicb->msix_vect = rx_ring->irq;
3177
3178        bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3179        cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3180
3181        cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3182
3183        cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3184
3185        /*
3186         * Set up the control block load flags.
3187         */
3188        cqicb->flags = FLAGS_LC |       /* Load queue base address */
3189            FLAGS_LV |          /* Load MSI-X vector */
3190            FLAGS_LI;           /* Load irq delay values */
3191        if (rx_ring->lbq_len) {
3192                cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3193                tmp = (u64)rx_ring->lbq_base_dma;
3194                base_indirect_ptr = rx_ring->lbq_base_indirect;
3195                page_entries = 0;
3196                do {
3197                        *base_indirect_ptr = cpu_to_le64(tmp);
3198                        tmp += DB_PAGE_SIZE;
3199                        base_indirect_ptr++;
3200                        page_entries++;
3201                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3202                cqicb->lbq_addr =
3203                    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3204                bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3205                        (u16) rx_ring->lbq_buf_size;
3206                cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3207                bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3208                        (u16) rx_ring->lbq_len;
3209                cqicb->lbq_len = cpu_to_le16(bq_len);
3210                rx_ring->lbq_prod_idx = 0;
3211                rx_ring->lbq_curr_idx = 0;
3212                rx_ring->lbq_clean_idx = 0;
3213                rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3214        }
3215        if (rx_ring->sbq_len) {
3216                cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3217                tmp = (u64)rx_ring->sbq_base_dma;
3218                base_indirect_ptr = rx_ring->sbq_base_indirect;
3219                page_entries = 0;
3220                do {
3221                        *base_indirect_ptr = cpu_to_le64(tmp);
3222                        tmp += DB_PAGE_SIZE;
3223                        base_indirect_ptr++;
3224                        page_entries++;
3225                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3226                cqicb->sbq_addr =
3227                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3228                cqicb->sbq_buf_size =
3229                    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3230                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3231                        (u16) rx_ring->sbq_len;
3232                cqicb->sbq_len = cpu_to_le16(bq_len);
3233                rx_ring->sbq_prod_idx = 0;
3234                rx_ring->sbq_curr_idx = 0;
3235                rx_ring->sbq_clean_idx = 0;
3236                rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3237        }
3238        switch (rx_ring->type) {
3239        case TX_Q:
3240                cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3241                cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3242                break;
3243        case RX_Q:
3244                /* Inbound completion handling rx_rings run in
3245                 * separate NAPI contexts.
3246                 */
3247                netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3248                               64);
3249                cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3250                cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3251                break;
3252        default:
3253                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3254                             "Invalid rx_ring->type = %d.\n", rx_ring->type);
3255        }
3256        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3257                           CFG_LCQ, rx_ring->cq_id);
3258        if (err) {
3259                netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3260                return err;
3261        }
3262        return err;
3263}
3264
3265static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3266{
3267        struct wqicb *wqicb = (struct wqicb *)tx_ring;
3268        void __iomem *doorbell_area =
3269            qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3270        void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3271            (tx_ring->wq_id * sizeof(u64));
3272        u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3273            (tx_ring->wq_id * sizeof(u64));
3274        int err = 0;
3275
3276        /*
3277         * Assign doorbell registers for this tx_ring.
3278         */
3279        /* TX PCI doorbell mem area for tx producer index */
3280        tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3281        tx_ring->prod_idx = 0;
3282        /* TX PCI doorbell mem area + 0x04 */
3283        tx_ring->valid_db_reg = doorbell_area + 0x04;
3284
3285        /*
3286         * Assign shadow registers for this tx_ring.
3287         */
3288        tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3289        tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3290
3291        wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3292        wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3293                                   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3294        wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3295        wqicb->rid = 0;
3296        wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3297
3298        wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3299
3300        ql_init_tx_ring(qdev, tx_ring);
3301
3302        err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3303                           (u16) tx_ring->wq_id);
3304        if (err) {
3305                netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3306                return err;
3307        }
3308        return err;
3309}
3310
3311static void ql_disable_msix(struct ql_adapter *qdev)
3312{
3313        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3314                pci_disable_msix(qdev->pdev);
3315                clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3316                kfree(qdev->msi_x_entry);
3317                qdev->msi_x_entry = NULL;
3318        } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3319                pci_disable_msi(qdev->pdev);
3320                clear_bit(QL_MSI_ENABLED, &qdev->flags);
3321        }
3322}
3323
3324/* We start by trying to get the number of vectors
3325 * stored in qdev->intr_count. If we don't get that
3326 * many then we reduce the count and try again.
3327 */
3328static void ql_enable_msix(struct ql_adapter *qdev)
3329{
3330        int i, err;
3331
3332        /* Get the MSIX vectors. */
3333        if (qlge_irq_type == MSIX_IRQ) {
3334                /* Try to alloc space for the msix struct,
3335                 * if it fails then go to MSI/legacy.
3336                 */
3337                qdev->msi_x_entry = kcalloc(qdev->intr_count,
3338                                            sizeof(struct msix_entry),
3339                                            GFP_KERNEL);
3340                if (!qdev->msi_x_entry) {
3341                        qlge_irq_type = MSI_IRQ;
3342                        goto msi;
3343                }
3344
3345                for (i = 0; i < qdev->intr_count; i++)
3346                        qdev->msi_x_entry[i].entry = i;
3347
3348                err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3349                                            1, qdev->intr_count);
3350                if (err < 0) {
3351                        kfree(qdev->msi_x_entry);
3352                        qdev->msi_x_entry = NULL;
3353                        netif_warn(qdev, ifup, qdev->ndev,
3354                                   "MSI-X Enable failed, trying MSI.\n");
3355                        qlge_irq_type = MSI_IRQ;
3356                } else {
3357                        qdev->intr_count = err;
3358                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
3359                        netif_info(qdev, ifup, qdev->ndev,
3360                                   "MSI-X Enabled, got %d vectors.\n",
3361                                   qdev->intr_count);
3362                        return;
3363                }
3364        }
3365msi:
3366        qdev->intr_count = 1;
3367        if (qlge_irq_type == MSI_IRQ) {
3368                if (!pci_enable_msi(qdev->pdev)) {
3369                        set_bit(QL_MSI_ENABLED, &qdev->flags);
3370                        netif_info(qdev, ifup, qdev->ndev,
3371                                   "Running with MSI interrupts.\n");
3372                        return;
3373                }
3374        }
3375        qlge_irq_type = LEG_IRQ;
3376        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3377                     "Running with legacy interrupts.\n");
3378}
3379
3380/* Each vector services 1 RSS ring and and 1 or more
3381 * TX completion rings.  This function loops through
3382 * the TX completion rings and assigns the vector that
3383 * will service it.  An example would be if there are
3384 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3385 * This would mean that vector 0 would service RSS ring 0
3386 * and TX completion rings 0,1,2 and 3.  Vector 1 would
3387 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3388 */
3389static void ql_set_tx_vect(struct ql_adapter *qdev)
3390{
3391        int i, j, vect;
3392        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3393
3394        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3395                /* Assign irq vectors to TX rx_rings.*/
3396                for (vect = 0, j = 0, i = qdev->rss_ring_count;
3397                                         i < qdev->rx_ring_count; i++) {
3398                        if (j == tx_rings_per_vector) {
3399                                vect++;
3400                                j = 0;
3401                        }
3402                        qdev->rx_ring[i].irq = vect;
3403                        j++;
3404                }
3405        } else {
3406                /* For single vector all rings have an irq
3407                 * of zero.
3408                 */
3409                for (i = 0; i < qdev->rx_ring_count; i++)
3410                        qdev->rx_ring[i].irq = 0;
3411        }
3412}
3413
3414/* Set the interrupt mask for this vector.  Each vector
3415 * will service 1 RSS ring and 1 or more TX completion
3416 * rings.  This function sets up a bit mask per vector
3417 * that indicates which rings it services.
3418 */
3419static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3420{
3421        int j, vect = ctx->intr;
3422        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3423
3424        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3425                /* Add the RSS ring serviced by this vector
3426                 * to the mask.
3427                 */
3428                ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3429                /* Add the TX ring(s) serviced by this vector
3430                 * to the mask. */
3431                for (j = 0; j < tx_rings_per_vector; j++) {
3432                        ctx->irq_mask |=
3433                        (1 << qdev->rx_ring[qdev->rss_ring_count +
3434                        (vect * tx_rings_per_vector) + j].cq_id);
3435                }
3436        } else {
3437                /* For single vector we just shift each queue's
3438                 * ID into the mask.
3439                 */
3440                for (j = 0; j < qdev->rx_ring_count; j++)
3441                        ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3442        }
3443}
3444
3445/*
3446 * Here we build the intr_context structures based on
3447 * our rx_ring count and intr vector count.
3448 * The intr_context structure is used to hook each vector
3449 * to possibly different handlers.
3450 */
3451static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3452{
3453        int i = 0;
3454        struct intr_context *intr_context = &qdev->intr_context[0];
3455
3456        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3457                /* Each rx_ring has it's
3458                 * own intr_context since we have separate
3459                 * vectors for each queue.
3460                 */
3461                for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3462                        qdev->rx_ring[i].irq = i;
3463                        intr_context->intr = i;
3464                        intr_context->qdev = qdev;
3465                        /* Set up this vector's bit-mask that indicates
3466                         * which queues it services.
3467                         */
3468                        ql_set_irq_mask(qdev, intr_context);
3469                        /*
3470                         * We set up each vectors enable/disable/read bits so
3471                         * there's no bit/mask calculations in the critical path.
3472                         */
3473                        intr_context->intr_en_mask =
3474                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3475                            INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3476                            | i;
3477                        intr_context->intr_dis_mask =
3478                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3479                            INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3480                            INTR_EN_IHD | i;
3481                        intr_context->intr_read_mask =
3482                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3483                            INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3484                            i;
3485                        if (i == 0) {
3486                                /* The first vector/queue handles
3487                                 * broadcast/multicast, fatal errors,
3488                                 * and firmware events.  This in addition
3489                                 * to normal inbound NAPI processing.
3490                                 */
3491                                intr_context->handler = qlge_isr;
3492                                sprintf(intr_context->name, "%s-rx-%d",
3493                                        qdev->ndev->name, i);
3494                        } else {
3495                                /*
3496                                 * Inbound queues handle unicast frames only.
3497                                 */
3498                                intr_context->handler = qlge_msix_rx_isr;
3499                                sprintf(intr_context->name, "%s-rx-%d",
3500                                        qdev->ndev->name, i);
3501                        }
3502                }
3503        } else {
3504                /*
3505                 * All rx_rings use the same intr_context since
3506                 * there is only one vector.
3507                 */
3508                intr_context->intr = 0;
3509                intr_context->qdev = qdev;
3510                /*
3511                 * We set up each vectors enable/disable/read bits so
3512                 * there's no bit/mask calculations in the critical path.
3513                 */
3514                intr_context->intr_en_mask =
3515                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3516                intr_context->intr_dis_mask =
3517                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3518                    INTR_EN_TYPE_DISABLE;
3519                intr_context->intr_read_mask =
3520                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3521                /*
3522                 * Single interrupt means one handler for all rings.
3523                 */
3524                intr_context->handler = qlge_isr;
3525                sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3526                /* Set up this vector's bit-mask that indicates
3527                 * which queues it services. In this case there is
3528                 * a single vector so it will service all RSS and
3529                 * TX completion rings.
3530                 */
3531                ql_set_irq_mask(qdev, intr_context);
3532        }
3533        /* Tell the TX completion rings which MSIx vector
3534         * they will be using.
3535         */
3536        ql_set_tx_vect(qdev);
3537}
3538
3539static void ql_free_irq(struct ql_adapter *qdev)
3540{
3541        int i;
3542        struct intr_context *intr_context = &qdev->intr_context[0];
3543
3544        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3545                if (intr_context->hooked) {
3546                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3547                                free_irq(qdev->msi_x_entry[i].vector,
3548                                         &qdev->rx_ring[i]);
3549                        } else {
3550                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3551                        }
3552                }
3553        }
3554        ql_disable_msix(qdev);
3555}
3556
3557static int ql_request_irq(struct ql_adapter *qdev)
3558{
3559        int i;
3560        int status = 0;
3561        struct pci_dev *pdev = qdev->pdev;
3562        struct intr_context *intr_context = &qdev->intr_context[0];
3563
3564        ql_resolve_queues_to_irqs(qdev);
3565
3566        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3567                atomic_set(&intr_context->irq_cnt, 0);
3568                if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3569                        status = request_irq(qdev->msi_x_entry[i].vector,
3570                                             intr_context->handler,
3571                                             0,
3572                                             intr_context->name,
3573                                             &qdev->rx_ring[i]);
3574                        if (status) {
3575                                netif_err(qdev, ifup, qdev->ndev,
3576                                          "Failed request for MSIX interrupt %d.\n",
3577                                          i);
3578                                goto err_irq;
3579                        }
3580                } else {
3581                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3582                                     "trying msi or legacy interrupts.\n");
3583                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584                                     "%s: irq = %d.\n", __func__, pdev->irq);
3585                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586                                     "%s: context->name = %s.\n", __func__,
3587                                     intr_context->name);
3588                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3589                                     "%s: dev_id = 0x%p.\n", __func__,
3590                                     &qdev->rx_ring[0]);
3591                        status =
3592                            request_irq(pdev->irq, qlge_isr,
3593                                        test_bit(QL_MSI_ENABLED,
3594                                                 &qdev->
3595                                                 flags) ? 0 : IRQF_SHARED,
3596                                        intr_context->name, &qdev->rx_ring[0]);
3597                        if (status)
3598                                goto err_irq;
3599
3600                        netif_err(qdev, ifup, qdev->ndev,
3601                                  "Hooked intr %d, queue type %s, with name %s.\n",
3602                                  i,
3603                                  qdev->rx_ring[0].type == DEFAULT_Q ?
3604                                  "DEFAULT_Q" :
3605                                  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3606                                  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3607                                  intr_context->name);
3608                }
3609                intr_context->hooked = 1;
3610        }
3611        return status;
3612err_irq:
3613        netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3614        ql_free_irq(qdev);
3615        return status;
3616}
3617
3618static int ql_start_rss(struct ql_adapter *qdev)
3619{
3620        static const u8 init_hash_seed[] = {
3621                0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3622                0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3623                0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3624                0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3625                0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3626        };
3627        struct ricb *ricb = &qdev->ricb;
3628        int status = 0;
3629        int i;
3630        u8 *hash_id = (u8 *) ricb->hash_cq_id;
3631
3632        memset((void *)ricb, 0, sizeof(*ricb));
3633
3634        ricb->base_cq = RSS_L4K;
3635        ricb->flags =
3636                (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3637        ricb->mask = cpu_to_le16((u16)(0x3ff));
3638
3639        /*
3640         * Fill out the Indirection Table.
3641         */
3642        for (i = 0; i < 1024; i++)
3643                hash_id[i] = (i & (qdev->rss_ring_count - 1));
3644
3645        memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3646        memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3647
3648        status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3649        if (status) {
3650                netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3651                return status;
3652        }
3653        return status;
3654}
3655
3656static int ql_clear_routing_entries(struct ql_adapter *qdev)
3657{
3658        int i, status = 0;
3659
3660        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3661        if (status)
3662                return status;
3663        /* Clear all the entries in the routing table. */
3664        for (i = 0; i < 16; i++) {
3665                status = ql_set_routing_reg(qdev, i, 0, 0);
3666                if (status) {
3667                        netif_err(qdev, ifup, qdev->ndev,
3668                                  "Failed to init routing register for CAM packets.\n");
3669                        break;
3670                }
3671        }
3672        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3673        return status;
3674}
3675
3676/* Initialize the frame-to-queue routing. */
3677static int ql_route_initialize(struct ql_adapter *qdev)
3678{
3679        int status = 0;
3680
3681        /* Clear all the entries in the routing table. */
3682        status = ql_clear_routing_entries(qdev);
3683        if (status)
3684                return status;
3685
3686        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3687        if (status)
3688                return status;
3689
3690        status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3691                                                RT_IDX_IP_CSUM_ERR, 1);
3692        if (status) {
3693                netif_err(qdev, ifup, qdev->ndev,
3694                        "Failed to init routing register "
3695                        "for IP CSUM error packets.\n");
3696                goto exit;
3697        }
3698        status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3699                                                RT_IDX_TU_CSUM_ERR, 1);
3700        if (status) {
3701                netif_err(qdev, ifup, qdev->ndev,
3702                        "Failed to init routing register "
3703                        "for TCP/UDP CSUM error packets.\n");
3704                goto exit;
3705        }
3706        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3707        if (status) {
3708                netif_err(qdev, ifup, qdev->ndev,
3709                          "Failed to init routing register for broadcast packets.\n");
3710                goto exit;
3711        }
3712        /* If we have more than one inbound queue, then turn on RSS in the
3713         * routing block.
3714         */
3715        if (qdev->rss_ring_count > 1) {
3716                status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3717                                        RT_IDX_RSS_MATCH, 1);
3718                if (status) {
3719                        netif_err(qdev, ifup, qdev->ndev,
3720                                  "Failed to init routing register for MATCH RSS packets.\n");
3721                        goto exit;
3722                }
3723        }
3724
3725        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3726                                    RT_IDX_CAM_HIT, 1);
3727        if (status)
3728                netif_err(qdev, ifup, qdev->ndev,
3729                          "Failed to init routing register for CAM packets.\n");
3730exit:
3731        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3732        return status;
3733}
3734
3735int ql_cam_route_initialize(struct ql_adapter *qdev)
3736{
3737        int status, set;
3738
3739        /* If check if the link is up and use to
3740         * determine if we are setting or clearing
3741         * the MAC address in the CAM.
3742         */
3743        set = ql_read32(qdev, STS);
3744        set &= qdev->port_link_up;
3745        status = ql_set_mac_addr(qdev, set);
3746        if (status) {
3747                netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3748                return status;
3749        }
3750
3751        status = ql_route_initialize(qdev);
3752        if (status)
3753                netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3754
3755        return status;
3756}
3757
3758static int ql_adapter_initialize(struct ql_adapter *qdev)
3759{
3760        u32 value, mask;
3761        int i;
3762        int status = 0;
3763
3764        /*
3765         * Set up the System register to halt on errors.
3766         */
3767        value = SYS_EFE | SYS_FAE;
3768        mask = value << 16;
3769        ql_write32(qdev, SYS, mask | value);
3770
3771        /* Set the default queue, and VLAN behavior. */
3772        value = NIC_RCV_CFG_DFQ;
3773        mask = NIC_RCV_CFG_DFQ_MASK;
3774        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3775                value |= NIC_RCV_CFG_RV;
3776                mask |= (NIC_RCV_CFG_RV << 16);
3777        }
3778        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3779
3780        /* Set the MPI interrupt to enabled. */
3781        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3782
3783        /* Enable the function, set pagesize, enable error checking. */
3784        value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3785            FSC_EC | FSC_VM_PAGE_4K;
3786        value |= SPLT_SETTING;
3787
3788        /* Set/clear header splitting. */
3789        mask = FSC_VM_PAGESIZE_MASK |
3790            FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3791        ql_write32(qdev, FSC, mask | value);
3792
3793        ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3794
3795        /* Set RX packet routing to use port/pci function on which the
3796         * packet arrived on in addition to usual frame routing.
3797         * This is helpful on bonding where both interfaces can have
3798         * the same MAC address.
3799         */
3800        ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3801        /* Reroute all packets to our Interface.
3802         * They may have been routed to MPI firmware
3803         * due to WOL.
3804         */
3805        value = ql_read32(qdev, MGMT_RCV_CFG);
3806        value &= ~MGMT_RCV_CFG_RM;
3807        mask = 0xffff0000;
3808
3809        /* Sticky reg needs clearing due to WOL. */
3810        ql_write32(qdev, MGMT_RCV_CFG, mask);
3811        ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3812
3813        /* Default WOL is enable on Mezz cards */
3814        if (qdev->pdev->subsystem_device == 0x0068 ||
3815                        qdev->pdev->subsystem_device == 0x0180)
3816                qdev->wol = WAKE_MAGIC;
3817
3818        /* Start up the rx queues. */
3819        for (i = 0; i < qdev->rx_ring_count; i++) {
3820                status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3821                if (status) {
3822                        netif_err(qdev, ifup, qdev->ndev,
3823                                  "Failed to start rx ring[%d].\n", i);
3824                        return status;
3825                }
3826        }
3827
3828        /* If there is more than one inbound completion queue
3829         * then download a RICB to configure RSS.
3830         */
3831        if (qdev->rss_ring_count > 1) {
3832                status = ql_start_rss(qdev);
3833                if (status) {
3834                        netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3835                        return status;
3836                }
3837        }
3838
3839        /* Start up the tx queues. */
3840        for (i = 0; i < qdev->tx_ring_count; i++) {
3841                status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3842                if (status) {
3843                        netif_err(qdev, ifup, qdev->ndev,
3844                                  "Failed to start tx ring[%d].\n", i);
3845                        return status;
3846                }
3847        }
3848
3849        /* Initialize the port and set the max framesize. */
3850        status = qdev->nic_ops->port_initialize(qdev);
3851        if (status)
3852                netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3853
3854        /* Set up the MAC address and frame routing filter. */
3855        status = ql_cam_route_initialize(qdev);
3856        if (status) {
3857                netif_err(qdev, ifup, qdev->ndev,
3858                          "Failed to init CAM/Routing tables.\n");
3859                return status;
3860        }
3861
3862        /* Start NAPI for the RSS queues. */
3863        for (i = 0; i < qdev->rss_ring_count; i++)
3864                napi_enable(&qdev->rx_ring[i].napi);
3865
3866        return status;
3867}
3868
3869/* Issue soft reset to chip. */
3870static int ql_adapter_reset(struct ql_adapter *qdev)
3871{
3872        u32 value;
3873        int status = 0;
3874        unsigned long end_jiffies;
3875
3876        /* Clear all the entries in the routing table. */
3877        status = ql_clear_routing_entries(qdev);
3878        if (status) {
3879                netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3880                return status;
3881        }
3882
3883        /* Check if bit is set then skip the mailbox command and
3884         * clear the bit, else we are in normal reset process.
3885         */
3886        if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3887                /* Stop management traffic. */
3888                ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3889
3890                /* Wait for the NIC and MGMNT FIFOs to empty. */
3891                ql_wait_fifo_empty(qdev);
3892        } else
3893                clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3894
3895        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3896
3897        end_jiffies = jiffies + usecs_to_jiffies(30);
3898        do {
3899                value = ql_read32(qdev, RST_FO);
3900                if ((value & RST_FO_FR) == 0)
3901                        break;
3902                cpu_relax();
3903        } while (time_before(jiffies, end_jiffies));
3904
3905        if (value & RST_FO_FR) {
3906                netif_err(qdev, ifdown, qdev->ndev,
3907                          "ETIMEDOUT!!! errored out of resetting the chip!\n");
3908                status = -ETIMEDOUT;
3909        }
3910
3911        /* Resume management traffic. */
3912        ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3913        return status;
3914}
3915
3916static void ql_display_dev_info(struct net_device *ndev)
3917{
3918        struct ql_adapter *qdev = netdev_priv(ndev);
3919
3920        netif_info(qdev, probe, qdev->ndev,
3921                   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3922                   "XG Roll = %d, XG Rev = %d.\n",
3923                   qdev->func,
3924                   qdev->port,
3925                   qdev->chip_rev_id & 0x0000000f,
3926                   qdev->chip_rev_id >> 4 & 0x0000000f,
3927                   qdev->chip_rev_id >> 8 & 0x0000000f,
3928                   qdev->chip_rev_id >> 12 & 0x0000000f);
3929        netif_info(qdev, probe, qdev->ndev,
3930                   "MAC address %pM\n", ndev->dev_addr);
3931}
3932
3933static int ql_wol(struct ql_adapter *qdev)
3934{
3935        int status = 0;
3936        u32 wol = MB_WOL_DISABLE;
3937
3938        /* The CAM is still intact after a reset, but if we
3939         * are doing WOL, then we may need to program the
3940         * routing regs. We would also need to issue the mailbox
3941         * commands to instruct the MPI what to do per the ethtool
3942         * settings.
3943         */
3944
3945        if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3946                        WAKE_MCAST | WAKE_BCAST)) {
3947                netif_err(qdev, ifdown, qdev->ndev,
3948                          "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3949                          qdev->wol);
3950                return -EINVAL;
3951        }
3952
3953        if (qdev->wol & WAKE_MAGIC) {
3954                status = ql_mb_wol_set_magic(qdev, 1);
3955                if (status) {
3956                        netif_err(qdev, ifdown, qdev->ndev,
3957                                  "Failed to set magic packet on %s.\n",
3958                                  qdev->ndev->name);
3959                        return status;
3960                } else
3961                        netif_info(qdev, drv, qdev->ndev,
3962                                   "Enabled magic packet successfully on %s.\n",
3963                                   qdev->ndev->name);
3964
3965                wol |= MB_WOL_MAGIC_PKT;
3966        }
3967
3968        if (qdev->wol) {
3969                wol |= MB_WOL_MODE_ON;
3970                status = ql_mb_wol_mode(qdev, wol);
3971                netif_err(qdev, drv, qdev->ndev,
3972                          "WOL %s (wol code 0x%x) on %s\n",
3973                          (status == 0) ? "Successfully set" : "Failed",
3974                          wol, qdev->ndev->name);
3975        }
3976
3977        return status;
3978}
3979
3980static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3981{
3982
3983        /* Don't kill the reset worker thread if we
3984         * are in the process of recovery.
3985         */
3986        if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3987                cancel_delayed_work_sync(&qdev->asic_reset_work);
3988        cancel_delayed_work_sync(&qdev->mpi_reset_work);
3989        cancel_delayed_work_sync(&qdev->mpi_work);
3990        cancel_delayed_work_sync(&qdev->mpi_idc_work);
3991        cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3992        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3993}
3994
3995static int ql_adapter_down(struct ql_adapter *qdev)
3996{
3997        int i, status = 0;
3998
3999        ql_link_off(qdev);
4000
4001        ql_cancel_all_work_sync(qdev);
4002
4003        for (i = 0; i < qdev->rss_ring_count; i++)
4004                napi_disable(&qdev->rx_ring[i].napi);
4005
4006        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4007
4008        ql_disable_interrupts(qdev);
4009
4010        ql_tx_ring_clean(qdev);
4011
4012        /* Call netif_napi_del() from common point.
4013         */
4014        for (i = 0; i < qdev->rss_ring_count; i++)
4015                netif_napi_del(&qdev->rx_ring[i].napi);
4016
4017        status = ql_adapter_reset(qdev);
4018        if (status)
4019                netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4020                          qdev->func);
4021        ql_free_rx_buffers(qdev);
4022
4023        return status;
4024}
4025
4026static int ql_adapter_up(struct ql_adapter *qdev)
4027{
4028        int err = 0;
4029
4030        err = ql_adapter_initialize(qdev);
4031        if (err) {
4032                netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4033                goto err_init;
4034        }
4035        set_bit(QL_ADAPTER_UP, &qdev->flags);
4036        ql_alloc_rx_buffers(qdev);
4037        /* If the port is initialized and the
4038         * link is up the turn on the carrier.
4039         */
4040        if ((ql_read32(qdev, STS) & qdev->port_init) &&
4041                        (ql_read32(qdev, STS) & qdev->port_link_up))
4042                ql_link_on(qdev);
4043        /* Restore rx mode. */
4044        clear_bit(QL_ALLMULTI, &qdev->flags);
4045        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4046        qlge_set_multicast_list(qdev->ndev);
4047
4048        /* Restore vlan setting. */
4049        qlge_restore_vlan(qdev);
4050
4051        ql_enable_interrupts(qdev);
4052        ql_enable_all_completion_interrupts(qdev);
4053        netif_tx_start_all_queues(qdev->ndev);
4054
4055        return 0;
4056err_init:
4057        ql_adapter_reset(qdev);
4058        return err;
4059}
4060
4061static void ql_release_adapter_resources(struct ql_adapter *qdev)
4062{
4063        ql_free_mem_resources(qdev);
4064        ql_free_irq(qdev);
4065}
4066
4067static int ql_get_adapter_resources(struct ql_adapter *qdev)
4068{
4069        int status = 0;
4070
4071        if (ql_alloc_mem_resources(qdev)) {
4072                netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4073                return -ENOMEM;
4074        }
4075        status = ql_request_irq(qdev);
4076        return status;
4077}
4078
4079static int qlge_close(struct net_device *ndev)
4080{
4081        struct ql_adapter *qdev = netdev_priv(ndev);
4082
4083        /* If we hit pci_channel_io_perm_failure
4084         * failure condition, then we already
4085         * brought the adapter down.
4086         */
4087        if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4088                netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4089                clear_bit(QL_EEH_FATAL, &qdev->flags);
4090                return 0;
4091        }
4092
4093        /*
4094         * Wait for device to recover from a reset.
4095         * (Rarely happens, but possible.)
4096         */
4097        while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4098                msleep(1);
4099        ql_adapter_down(qdev);
4100        ql_release_adapter_resources(qdev);
4101        return 0;
4102}
4103
4104static int ql_configure_rings(struct ql_adapter *qdev)
4105{
4106        int i;
4107        struct rx_ring *rx_ring;
4108        struct tx_ring *tx_ring;
4109        int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4110        unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4111                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4112
4113        qdev->lbq_buf_order = get_order(lbq_buf_len);
4114
4115        /* In a perfect world we have one RSS ring for each CPU
4116         * and each has it's own vector.  To do that we ask for
4117         * cpu_cnt vectors.  ql_enable_msix() will adjust the
4118         * vector count to what we actually get.  We then
4119         * allocate an RSS ring for each.
4120         * Essentially, we are doing min(cpu_count, msix_vector_count).
4121         */
4122        qdev->intr_count = cpu_cnt;
4123        ql_enable_msix(qdev);
4124        /* Adjust the RSS ring count to the actual vector count. */
4125        qdev->rss_ring_count = qdev->intr_count;
4126        qdev->tx_ring_count = cpu_cnt;
4127        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4128
4129        for (i = 0; i < qdev->tx_ring_count; i++) {
4130                tx_ring = &qdev->tx_ring[i];
4131                memset((void *)tx_ring, 0, sizeof(*tx_ring));
4132                tx_ring->qdev = qdev;
4133                tx_ring->wq_id = i;
4134                tx_ring->wq_len = qdev->tx_ring_size;
4135                tx_ring->wq_size =
4136                    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4137
4138                /*
4139                 * The completion queue ID for the tx rings start
4140                 * immediately after the rss rings.
4141                 */
4142                tx_ring->cq_id = qdev->rss_ring_count + i;
4143        }
4144
4145        for (i = 0; i < qdev->rx_ring_count; i++) {
4146                rx_ring = &qdev->rx_ring[i];
4147                memset((void *)rx_ring, 0, sizeof(*rx_ring));
4148                rx_ring->qdev = qdev;
4149                rx_ring->cq_id = i;
4150                rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4151                if (i < qdev->rss_ring_count) {
4152                        gmb();
4153                        /*
4154                         * Inbound (RSS) queues.
4155                         */
4156                        rx_ring->cq_len = qdev->rx_ring_size;
4157                        rx_ring->cq_size =
4158                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4159                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4160                        rx_ring->lbq_size =
4161                            rx_ring->lbq_len * sizeof(__le64);
4162                        rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4163                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4164                        rx_ring->sbq_size =
4165                            rx_ring->sbq_len * sizeof(__le64);
4166                        rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4167                        rx_ring->type = RX_Q;
4168                } else {
4169                        gmb();
4170                        /*
4171                         * Outbound queue handles outbound completions only.
4172                         */
4173                        /* outbound cq is same size as tx_ring it services. */
4174                        rx_ring->cq_len = qdev->tx_ring_size;
4175                        rx_ring->cq_size =
4176                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4177                        rx_ring->lbq_len = 0;
4178                        rx_ring->lbq_size = 0;
4179                        rx_ring->lbq_buf_size = 0;
4180                        rx_ring->sbq_len = 0;
4181                        rx_ring->sbq_size = 0;
4182                        rx_ring->sbq_buf_size = 0;
4183                        rx_ring->type = TX_Q;
4184                }
4185        }
4186        return 0;
4187}
4188
4189static int qlge_open(struct net_device *ndev)
4190{
4191        int err = 0;
4192        struct ql_adapter *qdev = netdev_priv(ndev);
4193
4194        err = ql_adapter_reset(qdev);
4195        if (err)
4196                return err;
4197
4198        err = ql_configure_rings(qdev);
4199        if (err)
4200                return err;
4201
4202        err = ql_get_adapter_resources(qdev);
4203        if (err)
4204                goto error_up;
4205
4206        err = ql_adapter_up(qdev);
4207        if (err)
4208                goto error_up;
4209
4210        return err;
4211
4212error_up:
4213        ql_release_adapter_resources(qdev);
4214        return err;
4215}
4216
4217static int ql_change_rx_buffers(struct ql_adapter *qdev)
4218{
4219        struct rx_ring *rx_ring;
4220        int i, status;
4221        u32 lbq_buf_len;
4222
4223        /* Wait for an outstanding reset to complete. */
4224        if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4225                int i = 4;
4226
4227                while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4228                        netif_err(qdev, ifup, qdev->ndev,
4229                                  "Waiting for adapter UP...\n");
4230                        ssleep(1);
4231                }
4232
4233                if (!i) {
4234                        netif_err(qdev, ifup, qdev->ndev,
4235                                  "Timed out waiting for adapter UP\n");
4236                        return -ETIMEDOUT;
4237                }
4238        }
4239
4240        status = ql_adapter_down(qdev);
4241        if (status)
4242                goto error;
4243
4244        /* Get the new rx buffer size. */
4245        lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4246                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4247        qdev->lbq_buf_order = get_order(lbq_buf_len);
4248
4249        for (i = 0; i < qdev->rss_ring_count; i++) {
4250                rx_ring = &qdev->rx_ring[i];
4251                /* Set the new size. */
4252                rx_ring->lbq_buf_size = lbq_buf_len;
4253        }
4254
4255        status = ql_adapter_up(qdev);
4256        if (status)
4257                goto error;
4258
4259        return status;
4260error:
4261        netif_alert(qdev, ifup, qdev->ndev,
4262                    "Driver up/down cycle failed, closing device.\n");
4263        set_bit(QL_ADAPTER_UP, &qdev->flags);
4264        dev_close(qdev->ndev);
4265        return status;
4266}
4267
4268static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4269{
4270        struct ql_adapter *qdev = netdev_priv(ndev);
4271        int status;
4272
4273        if (ndev->mtu == 1500 && new_mtu == 9000) {
4274                netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4275        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4276                netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4277        } else
4278                return -EINVAL;
4279
4280        queue_delayed_work(qdev->workqueue,
4281                        &qdev->mpi_port_cfg_work, 3*HZ);
4282
4283        ndev->mtu = new_mtu;
4284
4285        if (!netif_running(qdev->ndev)) {
4286                return 0;
4287        }
4288
4289        status = ql_change_rx_buffers(qdev);
4290        if (status) {
4291                netif_err(qdev, ifup, qdev->ndev,
4292                          "Changing MTU failed.\n");
4293        }
4294
4295        return status;
4296}
4297
4298static struct net_device_stats *qlge_get_stats(struct net_device
4299                                               *ndev)
4300{
4301        struct ql_adapter *qdev = netdev_priv(ndev);
4302        struct rx_ring *rx_ring = &qdev->rx_ring[0];
4303        struct tx_ring *tx_ring = &qdev->tx_ring[0];
4304        unsigned long pkts, mcast, dropped, errors, bytes;
4305        int i;
4306
4307        /* Get RX stats. */
4308        pkts = mcast = dropped = errors = bytes = 0;
4309        for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4310                        pkts += rx_ring->rx_packets;
4311                        bytes += rx_ring->rx_bytes;
4312                        dropped += rx_ring->rx_dropped;
4313                        errors += rx_ring->rx_errors;
4314                        mcast += rx_ring->rx_multicast;
4315        }
4316        ndev->stats.rx_packets = pkts;
4317        ndev->stats.rx_bytes = bytes;
4318        ndev->stats.rx_dropped = dropped;
4319        ndev->stats.rx_errors = errors;
4320        ndev->stats.multicast = mcast;
4321
4322        /* Get TX stats. */
4323        pkts = errors = bytes = 0;
4324        for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4325                        pkts += tx_ring->tx_packets;
4326                        bytes += tx_ring->tx_bytes;
4327                        errors += tx_ring->tx_errors;
4328        }
4329        ndev->stats.tx_packets = pkts;
4330        ndev->stats.tx_bytes = bytes;
4331        ndev->stats.tx_errors = errors;
4332        return &ndev->stats;
4333}
4334
4335static void qlge_set_multicast_list(struct net_device *ndev)
4336{
4337        struct ql_adapter *qdev = netdev_priv(ndev);
4338        struct netdev_hw_addr *ha;
4339        int i, status;
4340
4341        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4342        if (status)
4343                return;
4344        /*
4345         * Set or clear promiscuous mode if a
4346         * transition is taking place.
4347         */
4348        if (ndev->flags & IFF_PROMISC) {
4349                if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4350                        if (ql_set_routing_reg
4351                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4352                                netif_err(qdev, hw, qdev->ndev,
4353                                          "Failed to set promiscuous mode.\n");
4354                        } else {
4355                                set_bit(QL_PROMISCUOUS, &qdev->flags);
4356                        }
4357                }
4358        } else {
4359                if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4360                        if (ql_set_routing_reg
4361                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4362                                netif_err(qdev, hw, qdev->ndev,
4363                                          "Failed to clear promiscuous mode.\n");
4364                        } else {
4365                                clear_bit(QL_PROMISCUOUS, &qdev->flags);
4366                        }
4367                }
4368        }
4369
4370        /*
4371         * Set or clear all multicast mode if a
4372         * transition is taking place.
4373         */
4374        if ((ndev->flags & IFF_ALLMULTI) ||
4375            (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4376                if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4377                        if (ql_set_routing_reg
4378                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4379                                netif_err(qdev, hw, qdev->ndev,
4380                                          "Failed to set all-multi mode.\n");
4381                        } else {
4382                                set_bit(QL_ALLMULTI, &qdev->flags);
4383                        }
4384                }
4385        } else {
4386                if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4387                        if (ql_set_routing_reg
4388                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4389                                netif_err(qdev, hw, qdev->ndev,
4390                                          "Failed to clear all-multi mode.\n");
4391                        } else {
4392                                clear_bit(QL_ALLMULTI, &qdev->flags);
4393                        }
4394                }
4395        }
4396
4397        if (!netdev_mc_empty(ndev)) {
4398                status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4399                if (status)
4400                        goto exit;
4401                i = 0;
4402                netdev_for_each_mc_addr(ha, ndev) {
4403                        if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4404                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
4405                                netif_err(qdev, hw, qdev->ndev,
4406                                          "Failed to loadmulticast address.\n");
4407                                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4408                                goto exit;
4409                        }
4410                        i++;
4411                }
4412                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4413                if (ql_set_routing_reg
4414                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4415                        netif_err(qdev, hw, qdev->ndev,
4416                                  "Failed to set multicast match mode.\n");
4417                } else {
4418                        set_bit(QL_ALLMULTI, &qdev->flags);
4419                }
4420        }
4421exit:
4422        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4423}
4424
4425static int qlge_set_mac_address(struct net_device *ndev, void *p)
4426{
4427        struct ql_adapter *qdev = netdev_priv(ndev);
4428        struct sockaddr *addr = p;
4429        int status;
4430
4431        if (!is_valid_ether_addr(addr->sa_data))
4432                return -EADDRNOTAVAIL;
4433        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4434        /* Update local copy of current mac address. */
4435        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4436
4437        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4438        if (status)
4439                return status;
4440        status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4441                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4442        if (status)
4443                netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4444        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4445        return status;
4446}
4447
4448static void qlge_tx_timeout(struct net_device *ndev)
4449{
4450        struct ql_adapter *qdev = netdev_priv(ndev);
4451        ql_queue_asic_error(qdev);
4452}
4453
4454static void ql_asic_reset_work(struct work_struct *work)
4455{
4456        struct ql_adapter *qdev =
4457            container_of(work, struct ql_adapter, asic_reset_work.work);
4458        int status;
4459        rtnl_lock();
4460        status = ql_adapter_down(qdev);
4461        if (status)
4462                goto error;
4463
4464        status = ql_adapter_up(qdev);
4465        if (status)
4466                goto error;
4467
4468        /* Restore rx mode. */
4469        clear_bit(QL_ALLMULTI, &qdev->flags);
4470        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4471        qlge_set_multicast_list(qdev->ndev);
4472
4473        rtnl_unlock();
4474        return;
4475error:
4476        netif_alert(qdev, ifup, qdev->ndev,
4477                    "Driver up/down cycle failed, closing device\n");
4478
4479        set_bit(QL_ADAPTER_UP, &qdev->flags);
4480        dev_close(qdev->ndev);
4481        rtnl_unlock();
4482}
4483
4484static const struct nic_operations qla8012_nic_ops = {
4485        .get_flash              = ql_get_8012_flash_params,
4486        .port_initialize        = ql_8012_port_initialize,
4487};
4488
4489static const struct nic_operations qla8000_nic_ops = {
4490        .get_flash              = ql_get_8000_flash_params,
4491        .port_initialize        = ql_8000_port_initialize,
4492};
4493
4494/* Find the pcie function number for the other NIC
4495 * on this chip.  Since both NIC functions share a
4496 * common firmware we have the lowest enabled function
4497 * do any common work.  Examples would be resetting
4498 * after a fatal firmware error, or doing a firmware
4499 * coredump.
4500 */
4501static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4502{
4503        int status = 0;
4504        u32 temp;
4505        u32 nic_func1, nic_func2;
4506
4507        status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4508                        &temp);
4509        if (status)
4510                return status;
4511
4512        nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4513                        MPI_TEST_NIC_FUNC_MASK);
4514        nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4515                        MPI_TEST_NIC_FUNC_MASK);
4516
4517        if (qdev->func == nic_func1)
4518                qdev->alt_func = nic_func2;
4519        else if (qdev->func == nic_func2)
4520                qdev->alt_func = nic_func1;
4521        else
4522                status = -EIO;
4523
4524        return status;
4525}
4526
4527static int ql_get_board_info(struct ql_adapter *qdev)
4528{
4529        int status;
4530        qdev->func =
4531            (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4532        if (qdev->func > 3)
4533                return -EIO;
4534
4535        status = ql_get_alt_pcie_func(qdev);
4536        if (status)
4537                return status;
4538
4539        qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4540        if (qdev->port) {
4541                qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4542                qdev->port_link_up = STS_PL1;
4543                qdev->port_init = STS_PI1;
4544                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4545                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4546        } else {
4547                qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4548                qdev->port_link_up = STS_PL0;
4549                qdev->port_init = STS_PI0;
4550                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4551                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4552        }
4553        qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4554        qdev->device_id = qdev->pdev->device;
4555        if (qdev->device_id == QLGE_DEVICE_ID_8012)
4556                qdev->nic_ops = &qla8012_nic_ops;
4557        else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4558                qdev->nic_ops = &qla8000_nic_ops;
4559        return status;
4560}
4561
4562static void ql_release_all(struct pci_dev *pdev)
4563{
4564        struct net_device *ndev = pci_get_drvdata(pdev);
4565        struct ql_adapter *qdev = netdev_priv(ndev);
4566
4567        if (qdev->workqueue) {
4568                destroy_workqueue(qdev->workqueue);
4569                qdev->workqueue = NULL;
4570        }
4571
4572        if (qdev->reg_base)
4573                iounmap(qdev->reg_base);
4574        if (qdev->doorbell_area)
4575                iounmap(qdev->doorbell_area);
4576        vfree(qdev->mpi_coredump);
4577        pci_release_regions(pdev);
4578}
4579
4580static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4581                          int cards_found)
4582{
4583        struct ql_adapter *qdev = netdev_priv(ndev);
4584        int err = 0;
4585
4586        memset((void *)qdev, 0, sizeof(*qdev));
4587        err = pci_enable_device(pdev);
4588        if (err) {
4589                dev_err(&pdev->dev, "PCI device enable failed.\n");
4590                return err;
4591        }
4592
4593        qdev->ndev = ndev;
4594        qdev->pdev = pdev;
4595        pci_set_drvdata(pdev, ndev);
4596
4597        /* Set PCIe read request size */
4598        err = pcie_set_readrq(pdev, 4096);
4599        if (err) {
4600                dev_err(&pdev->dev, "Set readrq failed.\n");
4601                goto err_out1;
4602        }
4603
4604        err = pci_request_regions(pdev, DRV_NAME);
4605        if (err) {
4606                dev_err(&pdev->dev, "PCI region request failed.\n");
4607                return err;
4608        }
4609
4610        pci_set_master(pdev);
4611        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4612                set_bit(QL_DMA64, &qdev->flags);
4613                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4614        } else {
4615                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4616                if (!err)
4617                       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4618        }
4619
4620        if (err) {
4621                dev_err(&pdev->dev, "No usable DMA configuration.\n");
4622                goto err_out2;
4623        }
4624
4625        /* Set PCIe reset type for EEH to fundamental. */
4626        pdev->needs_freset = 1;
4627        pci_save_state(pdev);
4628        qdev->reg_base =
4629            ioremap_nocache(pci_resource_start(pdev, 1),
4630                            pci_resource_len(pdev, 1));
4631        if (!qdev->reg_base) {
4632                dev_err(&pdev->dev, "Register mapping failed.\n");
4633                err = -ENOMEM;
4634                goto err_out2;
4635        }
4636
4637        qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4638        qdev->doorbell_area =
4639            ioremap_nocache(pci_resource_start(pdev, 3),
4640                            pci_resource_len(pdev, 3));
4641        if (!qdev->doorbell_area) {
4642                dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4643                err = -ENOMEM;
4644                goto err_out2;
4645        }
4646
4647        err = ql_get_board_info(qdev);
4648        if (err) {
4649                dev_err(&pdev->dev, "Register access failed.\n");
4650                err = -EIO;
4651                goto err_out2;
4652        }
4653        qdev->msg_enable = netif_msg_init(debug, default_msg);
4654        spin_lock_init(&qdev->hw_lock);
4655        spin_lock_init(&qdev->stats_lock);
4656
4657        if (qlge_mpi_coredump) {
4658                qdev->mpi_coredump =
4659                        vmalloc(sizeof(struct ql_mpi_coredump));
4660                if (qdev->mpi_coredump == NULL) {
4661                        err = -ENOMEM;
4662                        goto err_out2;
4663                }
4664                if (qlge_force_coredump)
4665                        set_bit(QL_FRC_COREDUMP, &qdev->flags);
4666        }
4667        /* make sure the EEPROM is good */
4668        err = qdev->nic_ops->get_flash(qdev);
4669        if (err) {
4670                dev_err(&pdev->dev, "Invalid FLASH.\n");
4671                goto err_out2;
4672        }
4673
4674        /* Keep local copy of current mac address. */
4675        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4676
4677        /* Set up the default ring sizes. */
4678        qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4679        qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4680
4681        /* Set up the coalescing parameters. */
4682        qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4683        qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4684        qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4685        qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4686
4687        /*
4688         * Set up the operating parameters.
4689         */
4690        qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
4691        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4692        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4693        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4694        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4695        INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4696        INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4697        init_completion(&qdev->ide_completion);
4698        mutex_init(&qdev->mpi_mutex);
4699
4700        if (!cards_found) {
4701                dev_info(&pdev->dev, "%s\n", DRV_STRING);
4702                dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4703                         DRV_NAME, DRV_VERSION);
4704        }
4705        return 0;
4706err_out2:
4707        ql_release_all(pdev);
4708err_out1:
4709        pci_disable_device(pdev);
4710        return err;
4711}
4712
4713static const struct net_device_ops qlge_netdev_ops = {
4714        .ndo_open               = qlge_open,
4715        .ndo_stop               = qlge_close,
4716        .ndo_start_xmit         = qlge_send,
4717        .ndo_change_mtu_rh74    = qlge_change_mtu,
4718        .ndo_get_stats          = qlge_get_stats,
4719        .ndo_set_rx_mode        = qlge_set_multicast_list,
4720        .ndo_set_mac_address    = qlge_set_mac_address,
4721        .ndo_validate_addr      = eth_validate_addr,
4722        .ndo_tx_timeout         = qlge_tx_timeout,
4723        .ndo_fix_features       = qlge_fix_features,
4724        .ndo_set_features       = qlge_set_features,
4725        .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4726        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4727};
4728
4729static void ql_timer(unsigned long data)
4730{
4731        struct ql_adapter *qdev = (struct ql_adapter *)data;
4732        u32 var = 0;
4733
4734        var = ql_read32(qdev, STS);
4735        if (pci_channel_offline(qdev->pdev)) {
4736                netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4737                return;
4738        }
4739
4740        mod_timer(&qdev->timer, jiffies + (5*HZ));
4741}
4742
4743static int qlge_probe(struct pci_dev *pdev,
4744                      const struct pci_device_id *pci_entry)
4745{
4746        struct net_device *ndev = NULL;
4747        struct ql_adapter *qdev = NULL;
4748        static int cards_found = 0;
4749        int err = 0;
4750
4751        ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4752                        min(MAX_CPUS, netif_get_num_default_rss_queues()));
4753        if (!ndev)
4754                return -ENOMEM;
4755
4756        err = ql_init_device(pdev, ndev, cards_found);
4757        if (err < 0) {
4758                free_netdev(ndev);
4759                return err;
4760        }
4761
4762        qdev = netdev_priv(ndev);
4763        SET_NETDEV_DEV(ndev, &pdev->dev);
4764        ndev->hw_features = NETIF_F_SG |
4765                            NETIF_F_IP_CSUM |
4766                            NETIF_F_TSO |
4767                            NETIF_F_TSO_ECN |
4768                            NETIF_F_HW_VLAN_CTAG_TX |
4769                            NETIF_F_HW_VLAN_CTAG_RX |
4770                            NETIF_F_HW_VLAN_CTAG_FILTER |
4771                            NETIF_F_RXCSUM;
4772        ndev->features = ndev->hw_features;
4773        ndev->vlan_features = ndev->hw_features;
4774        /* vlan gets same features (except vlan filter) */
4775        ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4776                                 NETIF_F_HW_VLAN_CTAG_TX |
4777                                 NETIF_F_HW_VLAN_CTAG_RX);
4778
4779        if (test_bit(QL_DMA64, &qdev->flags))
4780                ndev->features |= NETIF_F_HIGHDMA;
4781
4782        /*
4783         * Set up net_device structure.
4784         */
4785        ndev->tx_queue_len = qdev->tx_ring_size;
4786        ndev->irq = pdev->irq;
4787
4788        ndev->netdev_ops = &qlge_netdev_ops;
4789        ndev->ethtool_ops = &qlge_ethtool_ops;
4790        ndev->watchdog_timeo = 10 * HZ;
4791
4792        err = register_netdev(ndev);
4793        if (err) {
4794                dev_err(&pdev->dev, "net device registration failed.\n");
4795                ql_release_all(pdev);
4796                pci_disable_device(pdev);
4797                free_netdev(ndev);
4798                return err;
4799        }
4800        /* Start up the timer to trigger EEH if
4801         * the bus goes dead
4802         */
4803        init_timer_deferrable(&qdev->timer);
4804        qdev->timer.data = (unsigned long)qdev;
4805        qdev->timer.function = ql_timer;
4806        qdev->timer.expires = jiffies + (5*HZ);
4807        add_timer(&qdev->timer);
4808        ql_link_off(qdev);
4809        ql_display_dev_info(ndev);
4810        atomic_set(&qdev->lb_count, 0);
4811        cards_found++;
4812        return 0;
4813}
4814
4815netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4816{
4817        return qlge_send(skb, ndev);
4818}
4819
4820int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4821{
4822        return ql_clean_inbound_rx_ring(rx_ring, budget);
4823}
4824
4825static void qlge_remove(struct pci_dev *pdev)
4826{
4827        struct net_device *ndev = pci_get_drvdata(pdev);
4828        struct ql_adapter *qdev = netdev_priv(ndev);
4829        del_timer_sync(&qdev->timer);
4830        ql_cancel_all_work_sync(qdev);
4831        unregister_netdev(ndev);
4832        ql_release_all(pdev);
4833        pci_disable_device(pdev);
4834        free_netdev(ndev);
4835}
4836
4837/* Clean up resources without touching hardware. */
4838static void ql_eeh_close(struct net_device *ndev)
4839{
4840        int i;
4841        struct ql_adapter *qdev = netdev_priv(ndev);
4842
4843        if (netif_carrier_ok(ndev)) {
4844                netif_carrier_off(ndev);
4845                netif_stop_queue(ndev);
4846        }
4847
4848        /* Disabling the timer */
4849        ql_cancel_all_work_sync(qdev);
4850
4851        for (i = 0; i < qdev->rss_ring_count; i++)
4852                netif_napi_del(&qdev->rx_ring[i].napi);
4853
4854        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4855        ql_tx_ring_clean(qdev);
4856        ql_free_rx_buffers(qdev);
4857        ql_release_adapter_resources(qdev);
4858}
4859
4860/*
4861 * This callback is called by the PCI subsystem whenever
4862 * a PCI bus error is detected.
4863 */
4864static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4865                                               enum pci_channel_state state)
4866{
4867        struct net_device *ndev = pci_get_drvdata(pdev);
4868        struct ql_adapter *qdev = netdev_priv(ndev);
4869
4870        switch (state) {
4871        case pci_channel_io_normal:
4872                return PCI_ERS_RESULT_CAN_RECOVER;
4873        case pci_channel_io_frozen:
4874                netif_device_detach(ndev);
4875                del_timer_sync(&qdev->timer);
4876                if (netif_running(ndev))
4877                        ql_eeh_close(ndev);
4878                pci_disable_device(pdev);
4879                return PCI_ERS_RESULT_NEED_RESET;
4880        case pci_channel_io_perm_failure:
4881                dev_err(&pdev->dev,
4882                        "%s: pci_channel_io_perm_failure.\n", __func__);
4883                del_timer_sync(&qdev->timer);
4884                ql_eeh_close(ndev);
4885                set_bit(QL_EEH_FATAL, &qdev->flags);
4886                return PCI_ERS_RESULT_DISCONNECT;
4887        }
4888
4889        /* Request a slot reset. */
4890        return PCI_ERS_RESULT_NEED_RESET;
4891}
4892
4893/*
4894 * This callback is called after the PCI buss has been reset.
4895 * Basically, this tries to restart the card from scratch.
4896 * This is a shortened version of the device probe/discovery code,
4897 * it resembles the first-half of the () routine.
4898 */
4899static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4900{
4901        struct net_device *ndev = pci_get_drvdata(pdev);
4902        struct ql_adapter *qdev = netdev_priv(ndev);
4903
4904        pdev->error_state = pci_channel_io_normal;
4905
4906        pci_restore_state(pdev);
4907        if (pci_enable_device(pdev)) {
4908                netif_err(qdev, ifup, qdev->ndev,
4909                          "Cannot re-enable PCI device after reset.\n");
4910                return PCI_ERS_RESULT_DISCONNECT;
4911        }
4912        pci_set_master(pdev);
4913
4914        if (ql_adapter_reset(qdev)) {
4915                netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4916                set_bit(QL_EEH_FATAL, &qdev->flags);
4917                return PCI_ERS_RESULT_DISCONNECT;
4918        }
4919
4920        return PCI_ERS_RESULT_RECOVERED;
4921}
4922
4923static void qlge_io_resume(struct pci_dev *pdev)
4924{
4925        struct net_device *ndev = pci_get_drvdata(pdev);
4926        struct ql_adapter *qdev = netdev_priv(ndev);
4927        int err = 0;
4928
4929        if (netif_running(ndev)) {
4930                err = qlge_open(ndev);
4931                if (err) {
4932                        netif_err(qdev, ifup, qdev->ndev,
4933                                  "Device initialization failed after reset.\n");
4934                        return;
4935                }
4936        } else {
4937                netif_err(qdev, ifup, qdev->ndev,
4938                          "Device was not running prior to EEH.\n");
4939        }
4940        mod_timer(&qdev->timer, jiffies + (5*HZ));
4941        netif_device_attach(ndev);
4942}
4943
4944static const struct pci_error_handlers qlge_err_handler = {
4945        .error_detected = qlge_io_error_detected,
4946        .slot_reset = qlge_io_slot_reset,
4947        .resume = qlge_io_resume,
4948};
4949
4950static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4951{
4952        struct net_device *ndev = pci_get_drvdata(pdev);
4953        struct ql_adapter *qdev = netdev_priv(ndev);
4954        int err;
4955
4956        netif_device_detach(ndev);
4957        del_timer_sync(&qdev->timer);
4958
4959        if (netif_running(ndev)) {
4960                err = ql_adapter_down(qdev);
4961                if (!err)
4962                        return err;
4963        }
4964
4965        ql_wol(qdev);
4966        err = pci_save_state(pdev);
4967        if (err)
4968                return err;
4969
4970        pci_disable_device(pdev);
4971
4972        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4973
4974        return 0;
4975}
4976
4977#ifdef CONFIG_PM
4978static int qlge_resume(struct pci_dev *pdev)
4979{
4980        struct net_device *ndev = pci_get_drvdata(pdev);
4981        struct ql_adapter *qdev = netdev_priv(ndev);
4982        int err;
4983
4984        pci_set_power_state(pdev, PCI_D0);
4985        pci_restore_state(pdev);
4986        err = pci_enable_device(pdev);
4987        if (err) {
4988                netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4989                return err;
4990        }
4991        pci_set_master(pdev);
4992
4993        pci_enable_wake(pdev, PCI_D3hot, 0);
4994        pci_enable_wake(pdev, PCI_D3cold, 0);
4995
4996        if (netif_running(ndev)) {
4997                err = ql_adapter_up(qdev);
4998                if (err)
4999                        return err;
5000        }
5001
5002        mod_timer(&qdev->timer, jiffies + (5*HZ));
5003        netif_device_attach(ndev);
5004
5005        return 0;
5006}
5007#endif /* CONFIG_PM */
5008
5009static void qlge_shutdown(struct pci_dev *pdev)
5010{
5011        qlge_suspend(pdev, PMSG_SUSPEND);
5012}
5013
5014static struct pci_driver qlge_driver = {
5015        .name = DRV_NAME,
5016        .id_table = qlge_pci_tbl,
5017        .probe = qlge_probe,
5018        .remove = qlge_remove,
5019#ifdef CONFIG_PM
5020        .suspend = qlge_suspend,
5021        .resume = qlge_resume,
5022#endif
5023        .shutdown = qlge_shutdown,
5024        .err_handler = &qlge_err_handler
5025};
5026
5027static int __init qlge_init_module(void)
5028{
5029        return pci_register_driver(&qlge_driver);
5030}
5031
5032static void __exit qlge_exit(void)
5033{
5034        pci_unregister_driver(&qlge_driver);
5035}
5036
5037module_init(qlge_init_module);
5038module_exit(qlge_exit);
5039