linux/drivers/net/ethernet/qlogic/qlge/qlge_main.c
<<
>>
Prefs
   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/bitops.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/if_vlan.h>
  37#include <linux/skbuff.h>
  38#include <linux/delay.h>
  39#include <linux/mm.h>
  40#include <linux/vmalloc.h>
  41#include <linux/prefetch.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |    */
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61/*  NETIF_MSG_TX_QUEUED | */
  62/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = -1;  /* defaults above */
  67module_param(debug, int, 0664);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int qlge_irq_type = MSIX_IRQ;
  74module_param(qlge_irq_type, int, 0664);
  75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static int qlge_mpi_coredump;
  78module_param(qlge_mpi_coredump, int, 0);
  79MODULE_PARM_DESC(qlge_mpi_coredump,
  80                "Option to enable MPI firmware dump. "
  81                "Default is OFF - Do Not allocate memory. ");
  82
  83static int qlge_force_coredump;
  84module_param(qlge_force_coredump, int, 0);
  85MODULE_PARM_DESC(qlge_force_coredump,
  86                "Option to allow force of firmware core dump. "
  87                "Default is OFF - Do not allow.");
  88
  89static const struct pci_device_id qlge_pci_tbl[] = {
  90        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  91        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  92        /* required last entry */
  93        {0,}
  94};
  95
  96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  97
  98static int ql_wol(struct ql_adapter *);
  99static void qlge_set_multicast_list(struct net_device *);
 100static int ql_adapter_down(struct ql_adapter *);
 101static int ql_adapter_up(struct ql_adapter *);
 102
 103/* This hardware semaphore causes exclusive access to
 104 * resources shared between the NIC driver, MPI firmware,
 105 * FCOE firmware and the FC driver.
 106 */
 107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 108{
 109        u32 sem_bits = 0;
 110
 111        switch (sem_mask) {
 112        case SEM_XGMAC0_MASK:
 113                sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
 114                break;
 115        case SEM_XGMAC1_MASK:
 116                sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 117                break;
 118        case SEM_ICB_MASK:
 119                sem_bits = SEM_SET << SEM_ICB_SHIFT;
 120                break;
 121        case SEM_MAC_ADDR_MASK:
 122                sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 123                break;
 124        case SEM_FLASH_MASK:
 125                sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 126                break;
 127        case SEM_PROBE_MASK:
 128                sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 129                break;
 130        case SEM_RT_IDX_MASK:
 131                sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 132                break;
 133        case SEM_PROC_REG_MASK:
 134                sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 135                break;
 136        default:
 137                netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
 138                return -EINVAL;
 139        }
 140
 141        ql_write32(qdev, SEM, sem_bits | sem_mask);
 142        return !(ql_read32(qdev, SEM) & sem_bits);
 143}
 144
 145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 146{
 147        unsigned int wait_count = 30;
 148        do {
 149                if (!ql_sem_trylock(qdev, sem_mask))
 150                        return 0;
 151                udelay(100);
 152        } while (--wait_count);
 153        return -ETIMEDOUT;
 154}
 155
 156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 157{
 158        ql_write32(qdev, SEM, sem_mask);
 159        ql_read32(qdev, SEM);   /* flush */
 160}
 161
 162/* This function waits for a specific bit to come ready
 163 * in a given register.  It is used mostly by the initialize
 164 * process, but is also used in kernel thread API such as
 165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 166 */
 167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 168{
 169        u32 temp;
 170        int count = UDELAY_COUNT;
 171
 172        while (count) {
 173                temp = ql_read32(qdev, reg);
 174
 175                /* check for errors */
 176                if (temp & err_bit) {
 177                        netif_alert(qdev, probe, qdev->ndev,
 178                                    "register 0x%.08x access error, value = 0x%.08x!.\n",
 179                                    reg, temp);
 180                        return -EIO;
 181                } else if (temp & bit)
 182                        return 0;
 183                udelay(UDELAY_DELAY);
 184                count--;
 185        }
 186        netif_alert(qdev, probe, qdev->ndev,
 187                    "Timed out waiting for reg %x to come ready.\n", reg);
 188        return -ETIMEDOUT;
 189}
 190
 191/* The CFG register is used to download TX and RX control blocks
 192 * to the chip. This function waits for an operation to complete.
 193 */
 194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 195{
 196        int count = UDELAY_COUNT;
 197        u32 temp;
 198
 199        while (count) {
 200                temp = ql_read32(qdev, CFG);
 201                if (temp & CFG_LE)
 202                        return -EIO;
 203                if (!(temp & bit))
 204                        return 0;
 205                udelay(UDELAY_DELAY);
 206                count--;
 207        }
 208        return -ETIMEDOUT;
 209}
 210
 211
 212/* Used to issue init control blocks to hw. Maps control block,
 213 * sets address, triggers download, waits for completion.
 214 */
 215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 216                 u16 q_id)
 217{
 218        u64 map;
 219        int status = 0;
 220        int direction;
 221        u32 mask;
 222        u32 value;
 223
 224        direction =
 225            (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 226            PCI_DMA_FROMDEVICE;
 227
 228        map = pci_map_single(qdev->pdev, ptr, size, direction);
 229        if (pci_dma_mapping_error(qdev->pdev, map)) {
 230                netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
 231                return -ENOMEM;
 232        }
 233
 234        status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 235        if (status)
 236                return status;
 237
 238        status = ql_wait_cfg(qdev, bit);
 239        if (status) {
 240                netif_err(qdev, ifup, qdev->ndev,
 241                          "Timed out waiting for CFG to come ready.\n");
 242                goto exit;
 243        }
 244
 245        ql_write32(qdev, ICB_L, (u32) map);
 246        ql_write32(qdev, ICB_H, (u32) (map >> 32));
 247
 248        mask = CFG_Q_MASK | (bit << 16);
 249        value = bit | (q_id << CFG_Q_SHIFT);
 250        ql_write32(qdev, CFG, (mask | value));
 251
 252        /*
 253         * Wait for the bit to clear after signaling hw.
 254         */
 255        status = ql_wait_cfg(qdev, bit);
 256exit:
 257        ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
 258        pci_unmap_single(qdev->pdev, map, size, direction);
 259        return status;
 260}
 261
 262/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 264                        u32 *value)
 265{
 266        u32 offset = 0;
 267        int status;
 268
 269        switch (type) {
 270        case MAC_ADDR_TYPE_MULTI_MAC:
 271        case MAC_ADDR_TYPE_CAM_MAC:
 272                {
 273                        status =
 274                            ql_wait_reg_rdy(qdev,
 275                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 276                        if (status)
 277                                goto exit;
 278                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 279                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 280                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 281                        status =
 282                            ql_wait_reg_rdy(qdev,
 283                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 284                        if (status)
 285                                goto exit;
 286                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 287                        status =
 288                            ql_wait_reg_rdy(qdev,
 289                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 290                        if (status)
 291                                goto exit;
 292                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295                        status =
 296                            ql_wait_reg_rdy(qdev,
 297                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 298                        if (status)
 299                                goto exit;
 300                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
 302                                status =
 303                                    ql_wait_reg_rdy(qdev,
 304                                        MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 305                                if (status)
 306                                        goto exit;
 307                                ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 308                                           (index << MAC_ADDR_IDX_SHIFT) | /* index */
 309                                           MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 310                                status =
 311                                    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 312                                                    MAC_ADDR_MR, 0);
 313                                if (status)
 314                                        goto exit;
 315                                *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 316                        }
 317                        break;
 318                }
 319        case MAC_ADDR_TYPE_VLAN:
 320        case MAC_ADDR_TYPE_MULTI_FLTR:
 321        default:
 322                netif_crit(qdev, ifup, qdev->ndev,
 323                           "Address type %d not yet supported.\n", type);
 324                status = -EPERM;
 325        }
 326exit:
 327        return status;
 328}
 329
 330/* Set up a MAC, multicast or VLAN address for the
 331 * inbound frame matching.
 332 */
 333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 334                               u16 index)
 335{
 336        u32 offset = 0;
 337        int status = 0;
 338
 339        switch (type) {
 340        case MAC_ADDR_TYPE_MULTI_MAC:
 341                {
 342                        u32 upper = (addr[0] << 8) | addr[1];
 343                        u32 lower = (addr[2] << 24) | (addr[3] << 16) |
 344                                        (addr[4] << 8) | (addr[5]);
 345
 346                        status =
 347                                ql_wait_reg_rdy(qdev,
 348                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 349                        if (status)
 350                                goto exit;
 351                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 352                                (index << MAC_ADDR_IDX_SHIFT) |
 353                                type | MAC_ADDR_E);
 354                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 355                        status =
 356                                ql_wait_reg_rdy(qdev,
 357                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 358                        if (status)
 359                                goto exit;
 360                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 361                                (index << MAC_ADDR_IDX_SHIFT) |
 362                                type | MAC_ADDR_E);
 363
 364                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 365                        status =
 366                                ql_wait_reg_rdy(qdev,
 367                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 368                        if (status)
 369                                goto exit;
 370                        break;
 371                }
 372        case MAC_ADDR_TYPE_CAM_MAC:
 373                {
 374                        u32 cam_output;
 375                        u32 upper = (addr[0] << 8) | addr[1];
 376                        u32 lower =
 377                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 378                            (addr[5]);
 379                        status =
 380                            ql_wait_reg_rdy(qdev,
 381                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 382                        if (status)
 383                                goto exit;
 384                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 385                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 386                                   type);       /* type */
 387                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 388                        status =
 389                            ql_wait_reg_rdy(qdev,
 390                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 391                        if (status)
 392                                goto exit;
 393                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 394                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 395                                   type);       /* type */
 396                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 397                        status =
 398                            ql_wait_reg_rdy(qdev,
 399                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 400                        if (status)
 401                                goto exit;
 402                        ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
 403                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 404                                   type);       /* type */
 405                        /* This field should also include the queue id
 406                           and possibly the function id.  Right now we hardcode
 407                           the route field to NIC core.
 408                         */
 409                        cam_output = (CAM_OUT_ROUTE_NIC |
 410                                      (qdev->
 411                                       func << CAM_OUT_FUNC_SHIFT) |
 412                                        (0 << CAM_OUT_CQ_ID_SHIFT));
 413                        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 414                                cam_output |= CAM_OUT_RV;
 415                        /* route to NIC core */
 416                        ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 417                        break;
 418                }
 419        case MAC_ADDR_TYPE_VLAN:
 420                {
 421                        u32 enable_bit = *((u32 *) &addr[0]);
 422                        /* For VLAN, the addr actually holds a bit that
 423                         * either enables or disables the vlan id we are
 424                         * addressing. It's either MAC_ADDR_E on or off.
 425                         * That's bit-27 we're talking about.
 426                         */
 427                        status =
 428                            ql_wait_reg_rdy(qdev,
 429                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 430                        if (status)
 431                                goto exit;
 432                        ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
 433                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 434                                   type |       /* type */
 435                                   enable_bit); /* enable/disable */
 436                        break;
 437                }
 438        case MAC_ADDR_TYPE_MULTI_FLTR:
 439        default:
 440                netif_crit(qdev, ifup, qdev->ndev,
 441                           "Address type %d not yet supported.\n", type);
 442                status = -EPERM;
 443        }
 444exit:
 445        return status;
 446}
 447
 448/* Set or clear MAC address in hardware. We sometimes
 449 * have to clear it to prevent wrong frame routing
 450 * especially in a bonding environment.
 451 */
 452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 453{
 454        int status;
 455        char zero_mac_addr[ETH_ALEN];
 456        char *addr;
 457
 458        if (set) {
 459                addr = &qdev->current_mac_addr[0];
 460                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 461                             "Set Mac addr %pM\n", addr);
 462        } else {
 463                eth_zero_addr(zero_mac_addr);
 464                addr = &zero_mac_addr[0];
 465                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 466                             "Clearing MAC address\n");
 467        }
 468        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 469        if (status)
 470                return status;
 471        status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
 472                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
 473        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 474        if (status)
 475                netif_err(qdev, ifup, qdev->ndev,
 476                          "Failed to init mac address.\n");
 477        return status;
 478}
 479
 480void ql_link_on(struct ql_adapter *qdev)
 481{
 482        netif_err(qdev, link, qdev->ndev, "Link is up.\n");
 483        netif_carrier_on(qdev->ndev);
 484        ql_set_mac_addr(qdev, 1);
 485}
 486
 487void ql_link_off(struct ql_adapter *qdev)
 488{
 489        netif_err(qdev, link, qdev->ndev, "Link is down.\n");
 490        netif_carrier_off(qdev->ndev);
 491        ql_set_mac_addr(qdev, 0);
 492}
 493
 494/* Get a specific frame routing value from the CAM.
 495 * Used for debug and reg dump.
 496 */
 497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 498{
 499        int status = 0;
 500
 501        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 502        if (status)
 503                goto exit;
 504
 505        ql_write32(qdev, RT_IDX,
 506                   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 507        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
 508        if (status)
 509                goto exit;
 510        *value = ql_read32(qdev, RT_DATA);
 511exit:
 512        return status;
 513}
 514
 515/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 516 * to route different frame types to various inbound queues.  We send broadcast/
 517 * multicast/error frames to the default queue for slow handling,
 518 * and CAM hit/RSS frames to the fast handling queues.
 519 */
 520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 521                              int enable)
 522{
 523        int status = -EINVAL; /* Return error if no mask match. */
 524        u32 value = 0;
 525
 526        switch (mask) {
 527        case RT_IDX_CAM_HIT:
 528                {
 529                        value = RT_IDX_DST_CAM_Q |      /* dest */
 530                            RT_IDX_TYPE_NICQ |  /* type */
 531                            (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 532                        break;
 533                }
 534        case RT_IDX_VALID:      /* Promiscuous Mode frames. */
 535                {
 536                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 537                            RT_IDX_TYPE_NICQ |  /* type */
 538                            (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 539                        break;
 540                }
 541        case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
 542                {
 543                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 544                            RT_IDX_TYPE_NICQ |  /* type */
 545                            (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 546                        break;
 547                }
 548        case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
 549                {
 550                        value = RT_IDX_DST_DFLT_Q | /* dest */
 551                                RT_IDX_TYPE_NICQ | /* type */
 552                                (RT_IDX_IP_CSUM_ERR_SLOT <<
 553                                RT_IDX_IDX_SHIFT); /* index */
 554                        break;
 555                }
 556        case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
 557                {
 558                        value = RT_IDX_DST_DFLT_Q | /* dest */
 559                                RT_IDX_TYPE_NICQ | /* type */
 560                                (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
 561                                RT_IDX_IDX_SHIFT); /* index */
 562                        break;
 563                }
 564        case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
 565                {
 566                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 567                            RT_IDX_TYPE_NICQ |  /* type */
 568                            (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 569                        break;
 570                }
 571        case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
 572                {
 573                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 574                            RT_IDX_TYPE_NICQ |  /* type */
 575                            (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 576                        break;
 577                }
 578        case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
 579                {
 580                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 581                            RT_IDX_TYPE_NICQ |  /* type */
 582                            (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 583                        break;
 584                }
 585        case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
 586                {
 587                        value = RT_IDX_DST_RSS |        /* dest */
 588                            RT_IDX_TYPE_NICQ |  /* type */
 589                            (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 590                        break;
 591                }
 592        case 0:         /* Clear the E-bit on an entry. */
 593                {
 594                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 595                            RT_IDX_TYPE_NICQ |  /* type */
 596                            (index << RT_IDX_IDX_SHIFT);/* index */
 597                        break;
 598                }
 599        default:
 600                netif_err(qdev, ifup, qdev->ndev,
 601                          "Mask type %d not yet supported.\n", mask);
 602                status = -EPERM;
 603                goto exit;
 604        }
 605
 606        if (value) {
 607                status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 608                if (status)
 609                        goto exit;
 610                value |= (enable ? RT_IDX_E : 0);
 611                ql_write32(qdev, RT_IDX, value);
 612                ql_write32(qdev, RT_DATA, enable ? mask : 0);
 613        }
 614exit:
 615        return status;
 616}
 617
 618static void ql_enable_interrupts(struct ql_adapter *qdev)
 619{
 620        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 621}
 622
 623static void ql_disable_interrupts(struct ql_adapter *qdev)
 624{
 625        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 626}
 627
 628/* If we're running with multiple MSI-X vectors then we enable on the fly.
 629 * Otherwise, we may have multiple outstanding workers and don't want to
 630 * enable until the last one finishes. In this case, the irq_cnt gets
 631 * incremented every time we queue a worker and decremented every time
 632 * a worker finishes.  Once it hits zero we enable the interrupt.
 633 */
 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 635{
 636        u32 var = 0;
 637        unsigned long hw_flags = 0;
 638        struct intr_context *ctx = qdev->intr_context + intr;
 639
 640        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 641                /* Always enable if we're MSIX multi interrupts and
 642                 * it's not the default (zeroeth) interrupt.
 643                 */
 644                ql_write32(qdev, INTR_EN,
 645                           ctx->intr_en_mask);
 646                var = ql_read32(qdev, STS);
 647                return var;
 648        }
 649
 650        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 651        if (atomic_dec_and_test(&ctx->irq_cnt)) {
 652                ql_write32(qdev, INTR_EN,
 653                           ctx->intr_en_mask);
 654                var = ql_read32(qdev, STS);
 655        }
 656        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 657        return var;
 658}
 659
 660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 661{
 662        u32 var = 0;
 663        struct intr_context *ctx;
 664
 665        /* HW disables for us if we're MSIX multi interrupts and
 666         * it's not the default (zeroeth) interrupt.
 667         */
 668        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 669                return 0;
 670
 671        ctx = qdev->intr_context + intr;
 672        spin_lock(&qdev->hw_lock);
 673        if (!atomic_read(&ctx->irq_cnt)) {
 674                ql_write32(qdev, INTR_EN,
 675                ctx->intr_dis_mask);
 676                var = ql_read32(qdev, STS);
 677        }
 678        atomic_inc(&ctx->irq_cnt);
 679        spin_unlock(&qdev->hw_lock);
 680        return var;
 681}
 682
 683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 684{
 685        int i;
 686        for (i = 0; i < qdev->intr_count; i++) {
 687                /* The enable call does a atomic_dec_and_test
 688                 * and enables only if the result is zero.
 689                 * So we precharge it here.
 690                 */
 691                if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 692                        i == 0))
 693                        atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 694                ql_enable_completion_interrupt(qdev, i);
 695        }
 696
 697}
 698
 699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
 700{
 701        int status, i;
 702        u16 csum = 0;
 703        __le16 *flash = (__le16 *)&qdev->flash;
 704
 705        status = strncmp((char *)&qdev->flash, str, 4);
 706        if (status) {
 707                netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
 708                return  status;
 709        }
 710
 711        for (i = 0; i < size; i++)
 712                csum += le16_to_cpu(*flash++);
 713
 714        if (csum)
 715                netif_err(qdev, ifup, qdev->ndev,
 716                          "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 717
 718        return csum;
 719}
 720
 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 722{
 723        int status = 0;
 724        /* wait for reg to come ready */
 725        status = ql_wait_reg_rdy(qdev,
 726                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 727        if (status)
 728                goto exit;
 729        /* set up for reg read */
 730        ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 731        /* wait for reg to come ready */
 732        status = ql_wait_reg_rdy(qdev,
 733                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 734        if (status)
 735                goto exit;
 736         /* This data is stored on flash as an array of
 737         * __le32.  Since ql_read32() returns cpu endian
 738         * we need to swap it back.
 739         */
 740        *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
 741exit:
 742        return status;
 743}
 744
 745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 746{
 747        u32 i, size;
 748        int status;
 749        __le32 *p = (__le32 *)&qdev->flash;
 750        u32 offset;
 751        u8 mac_addr[6];
 752
 753        /* Get flash offset for function and adjust
 754         * for dword access.
 755         */
 756        if (!qdev->port)
 757                offset = FUNC0_FLASH_OFFSET / sizeof(u32);
 758        else
 759                offset = FUNC1_FLASH_OFFSET / sizeof(u32);
 760
 761        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 762                return -ETIMEDOUT;
 763
 764        size = sizeof(struct flash_params_8000) / sizeof(u32);
 765        for (i = 0; i < size; i++, p++) {
 766                status = ql_read_flash_word(qdev, i+offset, p);
 767                if (status) {
 768                        netif_err(qdev, ifup, qdev->ndev,
 769                                  "Error reading flash.\n");
 770                        goto exit;
 771                }
 772        }
 773
 774        status = ql_validate_flash(qdev,
 775                        sizeof(struct flash_params_8000) / sizeof(u16),
 776                        "8000");
 777        if (status) {
 778                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 779                status = -EINVAL;
 780                goto exit;
 781        }
 782
 783        /* Extract either manufacturer or BOFM modified
 784         * MAC address.
 785         */
 786        if (qdev->flash.flash_params_8000.data_type1 == 2)
 787                memcpy(mac_addr,
 788                        qdev->flash.flash_params_8000.mac_addr1,
 789                        qdev->ndev->addr_len);
 790        else
 791                memcpy(mac_addr,
 792                        qdev->flash.flash_params_8000.mac_addr,
 793                        qdev->ndev->addr_len);
 794
 795        if (!is_valid_ether_addr(mac_addr)) {
 796                netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
 797                status = -EINVAL;
 798                goto exit;
 799        }
 800
 801        memcpy(qdev->ndev->dev_addr,
 802                mac_addr,
 803                qdev->ndev->addr_len);
 804
 805exit:
 806        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 807        return status;
 808}
 809
 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 811{
 812        int i;
 813        int status;
 814        __le32 *p = (__le32 *)&qdev->flash;
 815        u32 offset = 0;
 816        u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
 817
 818        /* Second function's parameters follow the first
 819         * function's.
 820         */
 821        if (qdev->port)
 822                offset = size;
 823
 824        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 825                return -ETIMEDOUT;
 826
 827        for (i = 0; i < size; i++, p++) {
 828                status = ql_read_flash_word(qdev, i+offset, p);
 829                if (status) {
 830                        netif_err(qdev, ifup, qdev->ndev,
 831                                  "Error reading flash.\n");
 832                        goto exit;
 833                }
 834
 835        }
 836
 837        status = ql_validate_flash(qdev,
 838                        sizeof(struct flash_params_8012) / sizeof(u16),
 839                        "8012");
 840        if (status) {
 841                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 842                status = -EINVAL;
 843                goto exit;
 844        }
 845
 846        if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
 847                status = -EINVAL;
 848                goto exit;
 849        }
 850
 851        memcpy(qdev->ndev->dev_addr,
 852                qdev->flash.flash_params_8012.mac_addr,
 853                qdev->ndev->addr_len);
 854
 855exit:
 856        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 857        return status;
 858}
 859
 860/* xgmac register are located behind the xgmac_addr and xgmac_data
 861 * register pair.  Each read/write requires us to wait for the ready
 862 * bit before reading/writing the data.
 863 */
 864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 865{
 866        int status;
 867        /* wait for reg to come ready */
 868        status = ql_wait_reg_rdy(qdev,
 869                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 870        if (status)
 871                return status;
 872        /* write the data to the data reg */
 873        ql_write32(qdev, XGMAC_DATA, data);
 874        /* trigger the write */
 875        ql_write32(qdev, XGMAC_ADDR, reg);
 876        return status;
 877}
 878
 879/* xgmac register are located behind the xgmac_addr and xgmac_data
 880 * register pair.  Each read/write requires us to wait for the ready
 881 * bit before reading/writing the data.
 882 */
 883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 884{
 885        int status = 0;
 886        /* wait for reg to come ready */
 887        status = ql_wait_reg_rdy(qdev,
 888                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 889        if (status)
 890                goto exit;
 891        /* set up for reg read */
 892        ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 893        /* wait for reg to come ready */
 894        status = ql_wait_reg_rdy(qdev,
 895                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 896        if (status)
 897                goto exit;
 898        /* get the data */
 899        *data = ql_read32(qdev, XGMAC_DATA);
 900exit:
 901        return status;
 902}
 903
 904/* This is used for reading the 64-bit statistics regs. */
 905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 906{
 907        int status = 0;
 908        u32 hi = 0;
 909        u32 lo = 0;
 910
 911        status = ql_read_xgmac_reg(qdev, reg, &lo);
 912        if (status)
 913                goto exit;
 914
 915        status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 916        if (status)
 917                goto exit;
 918
 919        *data = (u64) lo | ((u64) hi << 32);
 920
 921exit:
 922        return status;
 923}
 924
 925static int ql_8000_port_initialize(struct ql_adapter *qdev)
 926{
 927        int status;
 928        /*
 929         * Get MPI firmware version for driver banner
 930         * and ethool info.
 931         */
 932        status = ql_mb_about_fw(qdev);
 933        if (status)
 934                goto exit;
 935        status = ql_mb_get_fw_state(qdev);
 936        if (status)
 937                goto exit;
 938        /* Wake up a worker to get/set the TX/RX frame sizes. */
 939        queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
 940exit:
 941        return status;
 942}
 943
 944/* Take the MAC Core out of reset.
 945 * Enable statistics counting.
 946 * Take the transmitter/receiver out of reset.
 947 * This functionality may be done in the MPI firmware at a
 948 * later date.
 949 */
 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
 951{
 952        int status = 0;
 953        u32 data;
 954
 955        if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 956                /* Another function has the semaphore, so
 957                 * wait for the port init bit to come ready.
 958                 */
 959                netif_info(qdev, link, qdev->ndev,
 960                           "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 961                status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 962                if (status) {
 963                        netif_crit(qdev, link, qdev->ndev,
 964                                   "Port initialize timed out.\n");
 965                }
 966                return status;
 967        }
 968
 969        netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
 970        /* Set the core reset. */
 971        status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 972        if (status)
 973                goto end;
 974        data |= GLOBAL_CFG_RESET;
 975        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 976        if (status)
 977                goto end;
 978
 979        /* Clear the core reset and turn on jumbo for receiver. */
 980        data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
 981        data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
 982        data |= GLOBAL_CFG_TX_STAT_EN;
 983        data |= GLOBAL_CFG_RX_STAT_EN;
 984        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 985        if (status)
 986                goto end;
 987
 988        /* Enable transmitter, and clear it's reset. */
 989        status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 990        if (status)
 991                goto end;
 992        data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
 993        data |= TX_CFG_EN;      /* Enable the transmitter. */
 994        status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 995        if (status)
 996                goto end;
 997
 998        /* Enable receiver and clear it's reset. */
 999        status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000        if (status)
1001                goto end;
1002        data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1003        data |= RX_CFG_EN;      /* Enable the receiver. */
1004        status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005        if (status)
1006                goto end;
1007
1008        /* Turn on jumbo. */
1009        status =
1010            ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011        if (status)
1012                goto end;
1013        status =
1014            ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015        if (status)
1016                goto end;
1017
1018        /* Signal to the world that the port is enabled.        */
1019        ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021        ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022        return status;
1023}
1024
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027        return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
1030/* Get the next large buffer. */
1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032{
1033        struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034        rx_ring->lbq_curr_idx++;
1035        if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036                rx_ring->lbq_curr_idx = 0;
1037        rx_ring->lbq_free_cnt++;
1038        return lbq_desc;
1039}
1040
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042                struct rx_ring *rx_ring)
1043{
1044        struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046        pci_dma_sync_single_for_cpu(qdev->pdev,
1047                                        dma_unmap_addr(lbq_desc, mapaddr),
1048                                    rx_ring->lbq_buf_size,
1049                                        PCI_DMA_FROMDEVICE);
1050
1051        /* If it's the last chunk of our master page then
1052         * we unmap it.
1053         */
1054        if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055                                        == ql_lbq_block_size(qdev))
1056                pci_unmap_page(qdev->pdev,
1057                                lbq_desc->p.pg_chunk.map,
1058                                ql_lbq_block_size(qdev),
1059                                PCI_DMA_FROMDEVICE);
1060        return lbq_desc;
1061}
1062
1063/* Get the next small buffer. */
1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065{
1066        struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067        rx_ring->sbq_curr_idx++;
1068        if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069                rx_ring->sbq_curr_idx = 0;
1070        rx_ring->sbq_free_cnt++;
1071        return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077        rx_ring->cnsmr_idx++;
1078        rx_ring->curr_entry++;
1079        if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080                rx_ring->cnsmr_idx = 0;
1081                rx_ring->curr_entry = rx_ring->cq_base;
1082        }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087        ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091                                                struct bq_desc *lbq_desc)
1092{
1093        if (!rx_ring->pg_chunk.page) {
1094                u64 map;
1095                rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096                                                GFP_ATOMIC,
1097                                                qdev->lbq_buf_order);
1098                if (unlikely(!rx_ring->pg_chunk.page)) {
1099                        netif_err(qdev, drv, qdev->ndev,
1100                                  "page allocation failed.\n");
1101                        return -ENOMEM;
1102                }
1103                rx_ring->pg_chunk.offset = 0;
1104                map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105                                        0, ql_lbq_block_size(qdev),
1106                                        PCI_DMA_FROMDEVICE);
1107                if (pci_dma_mapping_error(qdev->pdev, map)) {
1108                        __free_pages(rx_ring->pg_chunk.page,
1109                                        qdev->lbq_buf_order);
1110                        rx_ring->pg_chunk.page = NULL;
1111                        netif_err(qdev, drv, qdev->ndev,
1112                                  "PCI mapping failed.\n");
1113                        return -ENOMEM;
1114                }
1115                rx_ring->pg_chunk.map = map;
1116                rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117        }
1118
1119        /* Copy the current master pg_chunk info
1120         * to the current descriptor.
1121         */
1122        lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124        /* Adjust the master page chunk for next
1125         * buffer get.
1126         */
1127        rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128        if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129                rx_ring->pg_chunk.page = NULL;
1130                lbq_desc->p.pg_chunk.last_flag = 1;
1131        } else {
1132                rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133                get_page(rx_ring->pg_chunk.page);
1134                lbq_desc->p.pg_chunk.last_flag = 0;
1135        }
1136        return 0;
1137}
1138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
1141        u32 clean_idx = rx_ring->lbq_clean_idx;
1142        u32 start_idx = clean_idx;
1143        struct bq_desc *lbq_desc;
1144        u64 map;
1145        int i;
1146
1147        while (rx_ring->lbq_free_cnt > 32) {
1148                for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150                                     "lbq: try cleaning clean_idx = %d.\n",
1151                                     clean_idx);
1152                        lbq_desc = &rx_ring->lbq[clean_idx];
1153                        if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154                                rx_ring->lbq_clean_idx = clean_idx;
1155                                netif_err(qdev, ifup, qdev->ndev,
1156                                                "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157                                                i, clean_idx);
1158                                return;
1159                        }
1160
1161                        map = lbq_desc->p.pg_chunk.map +
1162                                lbq_desc->p.pg_chunk.offset;
1163                                dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164                        dma_unmap_len_set(lbq_desc, maplen,
1165                                        rx_ring->lbq_buf_size);
1166                                *lbq_desc->addr = cpu_to_le64(map);
1167
1168                        pci_dma_sync_single_for_device(qdev->pdev, map,
1169                                                rx_ring->lbq_buf_size,
1170                                                PCI_DMA_FROMDEVICE);
1171                        clean_idx++;
1172                        if (clean_idx == rx_ring->lbq_len)
1173                                clean_idx = 0;
1174                }
1175
1176                rx_ring->lbq_clean_idx = clean_idx;
1177                rx_ring->lbq_prod_idx += 16;
1178                if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179                        rx_ring->lbq_prod_idx = 0;
1180                rx_ring->lbq_free_cnt -= 16;
1181        }
1182
1183        if (start_idx != clean_idx) {
1184                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185                             "lbq: updating prod idx = %d.\n",
1186                             rx_ring->lbq_prod_idx);
1187                ql_write_db_reg(rx_ring->lbq_prod_idx,
1188                                rx_ring->lbq_prod_idx_db_reg);
1189        }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
1195        u32 clean_idx = rx_ring->sbq_clean_idx;
1196        u32 start_idx = clean_idx;
1197        struct bq_desc *sbq_desc;
1198        u64 map;
1199        int i;
1200
1201        while (rx_ring->sbq_free_cnt > 16) {
1202                for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203                        sbq_desc = &rx_ring->sbq[clean_idx];
1204                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205                                     "sbq: try cleaning clean_idx = %d.\n",
1206                                     clean_idx);
1207                        if (sbq_desc->p.skb == NULL) {
1208                                netif_printk(qdev, rx_status, KERN_DEBUG,
1209                                             qdev->ndev,
1210                                             "sbq: getting new skb for index %d.\n",
1211                                             sbq_desc->index);
1212                                sbq_desc->p.skb =
1213                                    netdev_alloc_skb(qdev->ndev,
1214                                                     SMALL_BUFFER_SIZE);
1215                                if (sbq_desc->p.skb == NULL) {
1216                                        rx_ring->sbq_clean_idx = clean_idx;
1217                                        return;
1218                                }
1219                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220                                map = pci_map_single(qdev->pdev,
1221                                                     sbq_desc->p.skb->data,
1222                                                     rx_ring->sbq_buf_size,
1223                                                     PCI_DMA_FROMDEVICE);
1224                                if (pci_dma_mapping_error(qdev->pdev, map)) {
1225                                        netif_err(qdev, ifup, qdev->ndev,
1226                                                  "PCI mapping failed.\n");
1227                                        rx_ring->sbq_clean_idx = clean_idx;
1228                                        dev_kfree_skb_any(sbq_desc->p.skb);
1229                                        sbq_desc->p.skb = NULL;
1230                                        return;
1231                                }
1232                                dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                dma_unmap_len_set(sbq_desc, maplen,
1234                                                  rx_ring->sbq_buf_size);
1235                                *sbq_desc->addr = cpu_to_le64(map);
1236                        }
1237
1238                        clean_idx++;
1239                        if (clean_idx == rx_ring->sbq_len)
1240                                clean_idx = 0;
1241                }
1242                rx_ring->sbq_clean_idx = clean_idx;
1243                rx_ring->sbq_prod_idx += 16;
1244                if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                        rx_ring->sbq_prod_idx = 0;
1246                rx_ring->sbq_free_cnt -= 16;
1247        }
1248
1249        if (start_idx != clean_idx) {
1250                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251                             "sbq: updating prod idx = %d.\n",
1252                             rx_ring->sbq_prod_idx);
1253                ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                rx_ring->sbq_prod_idx_db_reg);
1255        }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                    struct rx_ring *rx_ring)
1260{
1261        ql_update_sbq(qdev, rx_ring);
1262        ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269                          struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271        int i;
1272        for (i = 0; i < mapped; i++) {
1273                if (i == 0 || (i == 7 && mapped > 7)) {
1274                        /*
1275                         * Unmap the skb->data area, or the
1276                         * external sglist (AKA the Outbound
1277                         * Address List (OAL)).
1278                         * If its the zeroeth element, then it's
1279                         * the skb->data area.  If it's the 7th
1280                         * element and there is more than 6 frags,
1281                         * then its an OAL.
1282                         */
1283                        if (i == 7) {
1284                                netif_printk(qdev, tx_done, KERN_DEBUG,
1285                                             qdev->ndev,
1286                                             "unmapping OAL area.\n");
1287                        }
1288                        pci_unmap_single(qdev->pdev,
1289                                         dma_unmap_addr(&tx_ring_desc->map[i],
1290                                                        mapaddr),
1291                                         dma_unmap_len(&tx_ring_desc->map[i],
1292                                                       maplen),
1293                                         PCI_DMA_TODEVICE);
1294                } else {
1295                        netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296                                     "unmapping frag %d.\n", i);
1297                        pci_unmap_page(qdev->pdev,
1298                                       dma_unmap_addr(&tx_ring_desc->map[i],
1299                                                      mapaddr),
1300                                       dma_unmap_len(&tx_ring_desc->map[i],
1301                                                     maplen), PCI_DMA_TODEVICE);
1302                }
1303        }
1304
1305}
1306
1307/* Map the buffers for this transmit.  This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311                       struct ob_mac_iocb_req *mac_iocb_ptr,
1312                       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314        int len = skb_headlen(skb);
1315        dma_addr_t map;
1316        int frag_idx, err, map_idx = 0;
1317        struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318        int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320        if (frag_cnt) {
1321                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322                             "frag_cnt = %d.\n", frag_cnt);
1323        }
1324        /*
1325         * Map the skb buffer first.
1326         */
1327        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329        err = pci_dma_mapping_error(qdev->pdev, map);
1330        if (err) {
1331                netif_err(qdev, tx_queued, qdev->ndev,
1332                          "PCI mapping failed with error: %d\n", err);
1333
1334                return NETDEV_TX_BUSY;
1335        }
1336
1337        tbd->len = cpu_to_le32(len);
1338        tbd->addr = cpu_to_le64(map);
1339        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341        map_idx++;
1342
1343        /*
1344         * This loop fills the remainder of the 8 address descriptors
1345         * in the IOCB.  If there are more than 7 fragments, then the
1346         * eighth address desc will point to an external list (OAL).
1347         * When this happens, the remainder of the frags will be stored
1348         * in this list.
1349         */
1350        for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351                skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352                tbd++;
1353                if (frag_idx == 6 && frag_cnt > 7) {
1354                        /* Let's tack on an sglist.
1355                         * Our control block will now
1356                         * look like this:
1357                         * iocb->seg[0] = skb->data
1358                         * iocb->seg[1] = frag[0]
1359                         * iocb->seg[2] = frag[1]
1360                         * iocb->seg[3] = frag[2]
1361                         * iocb->seg[4] = frag[3]
1362                         * iocb->seg[5] = frag[4]
1363                         * iocb->seg[6] = frag[5]
1364                         * iocb->seg[7] = ptr to OAL (external sglist)
1365                         * oal->seg[0] = frag[6]
1366                         * oal->seg[1] = frag[7]
1367                         * oal->seg[2] = frag[8]
1368                         * oal->seg[3] = frag[9]
1369                         * oal->seg[4] = frag[10]
1370                         *      etc...
1371                         */
1372                        /* Tack on the OAL in the eighth segment of IOCB. */
1373                        map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374                                             sizeof(struct oal),
1375                                             PCI_DMA_TODEVICE);
1376                        err = pci_dma_mapping_error(qdev->pdev, map);
1377                        if (err) {
1378                                netif_err(qdev, tx_queued, qdev->ndev,
1379                                          "PCI mapping outbound address list with error: %d\n",
1380                                          err);
1381                                goto map_error;
1382                        }
1383
1384                        tbd->addr = cpu_to_le64(map);
1385                        /*
1386                         * The length is the number of fragments
1387                         * that remain to be mapped times the length
1388                         * of our sglist (OAL).
1389                         */
1390                        tbd->len =
1391                            cpu_to_le32((sizeof(struct tx_buf_desc) *
1392                                         (frag_cnt - frag_idx)) | TX_DESC_C);
1393                        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394                                           map);
1395                        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396                                          sizeof(struct oal));
1397                        tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398                        map_idx++;
1399                }
1400
1401                map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402                                       DMA_TO_DEVICE);
1403
1404                err = dma_mapping_error(&qdev->pdev->dev, map);
1405                if (err) {
1406                        netif_err(qdev, tx_queued, qdev->ndev,
1407                                  "PCI mapping frags failed with error: %d.\n",
1408                                  err);
1409                        goto map_error;
1410                }
1411
1412                tbd->addr = cpu_to_le64(map);
1413                tbd->len = cpu_to_le32(skb_frag_size(frag));
1414                dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                  skb_frag_size(frag));
1417
1418        }
1419        /* Save the number of segments we've mapped. */
1420        tx_ring_desc->map_cnt = map_idx;
1421        /* Terminate the last segment. */
1422        tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423        return NETDEV_TX_OK;
1424
1425map_error:
1426        /*
1427         * If the first frag mapping failed, then i will be zero.
1428         * This causes the unmap of the skb->data area.  Otherwise
1429         * we pass in the number of frags that mapped successfully
1430         * so they can be umapped.
1431         */
1432        ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433        return NETDEV_TX_BUSY;
1434}
1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438                                 struct rx_ring *rx_ring)
1439{
1440        struct nic_stats *stats = &qdev->nic_stats;
1441
1442        stats->rx_err_count++;
1443        rx_ring->rx_errors++;
1444
1445        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447                stats->rx_code_err++;
1448                break;
1449        case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450                stats->rx_oversize_err++;
1451                break;
1452        case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453                stats->rx_undersize_err++;
1454                break;
1455        case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456                stats->rx_preamble_err++;
1457                break;
1458        case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459                stats->rx_frame_len_err++;
1460                break;
1461        case IB_MAC_IOCB_RSP_ERR_CRC:
1462                stats->rx_crc_err++;
1463        default:
1464                break;
1465        }
1466}
1467
1468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473                                  struct ib_mac_iocb_rsp *ib_mac_rsp,
1474                                  void *page, size_t *len)
1475{
1476        u16 *tags;
1477
1478        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479                return;
1480        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481                tags = (u16 *)page;
1482                /* Look for stacked vlan tags in ethertype field */
1483                if (tags[6] == ETH_P_8021Q &&
1484                    tags[8] == ETH_P_8021Q)
1485                        *len += 2 * VLAN_HLEN;
1486                else
1487                        *len += VLAN_HLEN;
1488        }
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493                                        struct rx_ring *rx_ring,
1494                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1495                                        u32 length,
1496                                        u16 vlan_id)
1497{
1498        struct sk_buff *skb;
1499        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500        struct napi_struct *napi = &rx_ring->napi;
1501
1502        /* Frame error, so drop the packet. */
1503        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505                put_page(lbq_desc->p.pg_chunk.page);
1506                return;
1507        }
1508        napi->dev = qdev->ndev;
1509
1510        skb = napi_get_frags(napi);
1511        if (!skb) {
1512                netif_err(qdev, drv, qdev->ndev,
1513                          "Couldn't get an skb, exiting.\n");
1514                rx_ring->rx_dropped++;
1515                put_page(lbq_desc->p.pg_chunk.page);
1516                return;
1517        }
1518        prefetch(lbq_desc->p.pg_chunk.va);
1519        __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520                             lbq_desc->p.pg_chunk.page,
1521                             lbq_desc->p.pg_chunk.offset,
1522                             length);
1523
1524        skb->len += length;
1525        skb->data_len += length;
1526        skb->truesize += length;
1527        skb_shinfo(skb)->nr_frags++;
1528
1529        rx_ring->rx_packets++;
1530        rx_ring->rx_bytes += length;
1531        skb->ip_summed = CHECKSUM_UNNECESSARY;
1532        skb_record_rx_queue(skb, rx_ring->cq_id);
1533        if (vlan_id != 0xffff)
1534                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535        napi_gro_frags(napi);
1536}
1537
1538/* Process an inbound completion from an rx ring. */
1539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540                                        struct rx_ring *rx_ring,
1541                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1542                                        u32 length,
1543                                        u16 vlan_id)
1544{
1545        struct net_device *ndev = qdev->ndev;
1546        struct sk_buff *skb = NULL;
1547        void *addr;
1548        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549        struct napi_struct *napi = &rx_ring->napi;
1550        size_t hlen = ETH_HLEN;
1551
1552        skb = netdev_alloc_skb(ndev, length);
1553        if (!skb) {
1554                rx_ring->rx_dropped++;
1555                put_page(lbq_desc->p.pg_chunk.page);
1556                return;
1557        }
1558
1559        addr = lbq_desc->p.pg_chunk.va;
1560        prefetch(addr);
1561
1562        /* Frame error, so drop the packet. */
1563        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565                goto err_out;
1566        }
1567
1568        /* Update the MAC header length*/
1569        ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
1571        /* The max framesize filter on this chip is set higher than
1572         * MTU since FCoE uses 2k frames.
1573         */
1574        if (skb->len > ndev->mtu + hlen) {
1575                netif_err(qdev, drv, qdev->ndev,
1576                          "Segment too small, dropping.\n");
1577                rx_ring->rx_dropped++;
1578                goto err_out;
1579        }
1580        skb_put_data(skb, addr, hlen);
1581        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583                     length);
1584        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585                                lbq_desc->p.pg_chunk.offset + hlen,
1586                                length - hlen);
1587        skb->len += length - hlen;
1588        skb->data_len += length - hlen;
1589        skb->truesize += length - hlen;
1590
1591        rx_ring->rx_packets++;
1592        rx_ring->rx_bytes += skb->len;
1593        skb->protocol = eth_type_trans(skb, ndev);
1594        skb_checksum_none_assert(skb);
1595
1596        if ((ndev->features & NETIF_F_RXCSUM) &&
1597                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598                /* TCP frame. */
1599                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601                                     "TCP checksum done!\n");
1602                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1603                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605                        /* Unfragmented ipv4 UDP frame. */
1606                        struct iphdr *iph =
1607                                (struct iphdr *)((u8 *)addr + hlen);
1608                        if (!(iph->frag_off &
1609                                htons(IP_MF|IP_OFFSET))) {
1610                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1611                                netif_printk(qdev, rx_status, KERN_DEBUG,
1612                                             qdev->ndev,
1613                                             "UDP checksum done!\n");
1614                        }
1615                }
1616        }
1617
1618        skb_record_rx_queue(skb, rx_ring->cq_id);
1619        if (vlan_id != 0xffff)
1620                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622                napi_gro_receive(napi, skb);
1623        else
1624                netif_receive_skb(skb);
1625        return;
1626err_out:
1627        dev_kfree_skb_any(skb);
1628        put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633                                        struct rx_ring *rx_ring,
1634                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1635                                        u32 length,
1636                                        u16 vlan_id)
1637{
1638        struct net_device *ndev = qdev->ndev;
1639        struct sk_buff *skb = NULL;
1640        struct sk_buff *new_skb = NULL;
1641        struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643        skb = sbq_desc->p.skb;
1644        /* Allocate new_skb and copy */
1645        new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646        if (new_skb == NULL) {
1647                rx_ring->rx_dropped++;
1648                return;
1649        }
1650        skb_reserve(new_skb, NET_IP_ALIGN);
1651
1652        pci_dma_sync_single_for_cpu(qdev->pdev,
1653                                    dma_unmap_addr(sbq_desc, mapaddr),
1654                                    dma_unmap_len(sbq_desc, maplen),
1655                                    PCI_DMA_FROMDEVICE);
1656
1657        skb_put_data(new_skb, skb->data, length);
1658
1659        pci_dma_sync_single_for_device(qdev->pdev,
1660                                       dma_unmap_addr(sbq_desc, mapaddr),
1661                                       dma_unmap_len(sbq_desc, maplen),
1662                                       PCI_DMA_FROMDEVICE);
1663        skb = new_skb;
1664
1665        /* Frame error, so drop the packet. */
1666        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668                dev_kfree_skb_any(skb);
1669                return;
1670        }
1671
1672        /* loopback self test for ethtool */
1673        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674                ql_check_lb_frame(qdev, skb);
1675                dev_kfree_skb_any(skb);
1676                return;
1677        }
1678
1679        /* The max framesize filter on this chip is set higher than
1680         * MTU since FCoE uses 2k frames.
1681         */
1682        if (skb->len > ndev->mtu + ETH_HLEN) {
1683                dev_kfree_skb_any(skb);
1684                rx_ring->rx_dropped++;
1685                return;
1686        }
1687
1688        prefetch(skb->data);
1689        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691                             "%s Multicast.\n",
1692                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1698        }
1699        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701                             "Promiscuous Packet.\n");
1702
1703        rx_ring->rx_packets++;
1704        rx_ring->rx_bytes += skb->len;
1705        skb->protocol = eth_type_trans(skb, ndev);
1706        skb_checksum_none_assert(skb);
1707
1708        /* If rx checksum is on, and there are no
1709         * csum or frame errors.
1710         */
1711        if ((ndev->features & NETIF_F_RXCSUM) &&
1712                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713                /* TCP frame. */
1714                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716                                     "TCP checksum done!\n");
1717                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1718                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720                        /* Unfragmented ipv4 UDP frame. */
1721                        struct iphdr *iph = (struct iphdr *) skb->data;
1722                        if (!(iph->frag_off &
1723                                htons(IP_MF|IP_OFFSET))) {
1724                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1725                                netif_printk(qdev, rx_status, KERN_DEBUG,
1726                                             qdev->ndev,
1727                                             "UDP checksum done!\n");
1728                        }
1729                }
1730        }
1731
1732        skb_record_rx_queue(skb, rx_ring->cq_id);
1733        if (vlan_id != 0xffff)
1734                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736                napi_gro_receive(&rx_ring->napi, skb);
1737        else
1738                netif_receive_skb(skb);
1739}
1740
1741static void ql_realign_skb(struct sk_buff *skb, int len)
1742{
1743        void *temp_addr = skb->data;
1744
1745        /* Undo the skb_reserve(skb,32) we did before
1746         * giving to hardware, and realign data on
1747         * a 2-byte boundary.
1748         */
1749        skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750        skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751        skb_copy_to_linear_data(skb, temp_addr,
1752                (unsigned int)len);
1753}
1754
1755/*
1756 * This function builds an skb for the given inbound
1757 * completion.  It will be rewritten for readability in the near
1758 * future, but for not it works well.
1759 */
1760static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761                                       struct rx_ring *rx_ring,
1762                                       struct ib_mac_iocb_rsp *ib_mac_rsp)
1763{
1764        struct bq_desc *lbq_desc;
1765        struct bq_desc *sbq_desc;
1766        struct sk_buff *skb = NULL;
1767        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769        size_t hlen = ETH_HLEN;
1770
1771        /*
1772         * Handle the header buffer if present.
1773         */
1774        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775            ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777                             "Header of %d bytes in small buffer.\n", hdr_len);
1778                /*
1779                 * Headers fit nicely into a small buffer.
1780                 */
1781                sbq_desc = ql_get_curr_sbuf(rx_ring);
1782                pci_unmap_single(qdev->pdev,
1783                                dma_unmap_addr(sbq_desc, mapaddr),
1784                                dma_unmap_len(sbq_desc, maplen),
1785                                PCI_DMA_FROMDEVICE);
1786                skb = sbq_desc->p.skb;
1787                ql_realign_skb(skb, hdr_len);
1788                skb_put(skb, hdr_len);
1789                sbq_desc->p.skb = NULL;
1790        }
1791
1792        /*
1793         * Handle the data buffer(s).
1794         */
1795        if (unlikely(!length)) {        /* Is there data too? */
1796                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                             "No Data buffer in this packet.\n");
1798                return skb;
1799        }
1800
1801        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804                                     "Headers in small, data of %d bytes in small, combine them.\n",
1805                                     length);
1806                        /*
1807                         * Data is less than small buffer size so it's
1808                         * stuffed in a small buffer.
1809                         * For this case we append the data
1810                         * from the "data" small buffer to the "header" small
1811                         * buffer.
1812                         */
1813                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1814                        pci_dma_sync_single_for_cpu(qdev->pdev,
1815                                                    dma_unmap_addr
1816                                                    (sbq_desc, mapaddr),
1817                                                    dma_unmap_len
1818                                                    (sbq_desc, maplen),
1819                                                    PCI_DMA_FROMDEVICE);
1820                        skb_put_data(skb, sbq_desc->p.skb->data, length);
1821                        pci_dma_sync_single_for_device(qdev->pdev,
1822                                                       dma_unmap_addr
1823                                                       (sbq_desc,
1824                                                        mapaddr),
1825                                                       dma_unmap_len
1826                                                       (sbq_desc,
1827                                                        maplen),
1828                                                       PCI_DMA_FROMDEVICE);
1829                } else {
1830                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831                                     "%d bytes in a single small buffer.\n",
1832                                     length);
1833                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1834                        skb = sbq_desc->p.skb;
1835                        ql_realign_skb(skb, length);
1836                        skb_put(skb, length);
1837                        pci_unmap_single(qdev->pdev,
1838                                         dma_unmap_addr(sbq_desc,
1839                                                        mapaddr),
1840                                         dma_unmap_len(sbq_desc,
1841                                                       maplen),
1842                                         PCI_DMA_FROMDEVICE);
1843                        sbq_desc->p.skb = NULL;
1844                }
1845        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1846                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1847                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848                                     "Header in small, %d bytes in large. Chain large to small!\n",
1849                                     length);
1850                        /*
1851                         * The data is in a single large buffer.  We
1852                         * chain it to the header buffer's skb and let
1853                         * it rip.
1854                         */
1855                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1856                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857                                     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1858                                     lbq_desc->p.pg_chunk.offset, length);
1859                        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1860                                                lbq_desc->p.pg_chunk.offset,
1861                                                length);
1862                        skb->len += length;
1863                        skb->data_len += length;
1864                        skb->truesize += length;
1865                } else {
1866                        /*
1867                         * The headers and data are in a single large buffer. We
1868                         * copy it to a new skb and let it go. This can happen with
1869                         * jumbo mtu on a non-TCP/UDP frame.
1870                         */
1871                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1872                        skb = netdev_alloc_skb(qdev->ndev, length);
1873                        if (skb == NULL) {
1874                                netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1875                                             "No skb available, drop the packet.\n");
1876                                return NULL;
1877                        }
1878                        pci_unmap_page(qdev->pdev,
1879                                       dma_unmap_addr(lbq_desc,
1880                                                      mapaddr),
1881                                       dma_unmap_len(lbq_desc, maplen),
1882                                       PCI_DMA_FROMDEVICE);
1883                        skb_reserve(skb, NET_IP_ALIGN);
1884                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885                                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1886                                     length);
1887                        skb_fill_page_desc(skb, 0,
1888                                                lbq_desc->p.pg_chunk.page,
1889                                                lbq_desc->p.pg_chunk.offset,
1890                                                length);
1891                        skb->len += length;
1892                        skb->data_len += length;
1893                        skb->truesize += length;
1894                        ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1895                                              lbq_desc->p.pg_chunk.va,
1896                                              &hlen);
1897                        __pskb_pull_tail(skb, hlen);
1898                }
1899        } else {
1900                /*
1901                 * The data is in a chain of large buffers
1902                 * pointed to by a small buffer.  We loop
1903                 * thru and chain them to the our small header
1904                 * buffer's skb.
1905                 * frags:  There are 18 max frags and our small
1906                 *         buffer will hold 32 of them. The thing is,
1907                 *         we'll use 3 max for our 9000 byte jumbo
1908                 *         frames.  If the MTU goes up we could
1909                 *          eventually be in trouble.
1910                 */
1911                int size, i = 0;
1912                sbq_desc = ql_get_curr_sbuf(rx_ring);
1913                pci_unmap_single(qdev->pdev,
1914                                 dma_unmap_addr(sbq_desc, mapaddr),
1915                                 dma_unmap_len(sbq_desc, maplen),
1916                                 PCI_DMA_FROMDEVICE);
1917                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1918                        /*
1919                         * This is an non TCP/UDP IP frame, so
1920                         * the headers aren't split into a small
1921                         * buffer.  We have to use the small buffer
1922                         * that contains our sg list as our skb to
1923                         * send upstairs. Copy the sg list here to
1924                         * a local buffer and use it to find the
1925                         * pages to chain.
1926                         */
1927                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1928                                     "%d bytes of headers & data in chain of large.\n",
1929                                     length);
1930                        skb = sbq_desc->p.skb;
1931                        sbq_desc->p.skb = NULL;
1932                        skb_reserve(skb, NET_IP_ALIGN);
1933                }
1934                do {
1935                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1936                        size = (length < rx_ring->lbq_buf_size) ? length :
1937                                rx_ring->lbq_buf_size;
1938
1939                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1940                                     "Adding page %d to skb for %d bytes.\n",
1941                                     i, size);
1942                        skb_fill_page_desc(skb, i,
1943                                                lbq_desc->p.pg_chunk.page,
1944                                                lbq_desc->p.pg_chunk.offset,
1945                                                size);
1946                        skb->len += size;
1947                        skb->data_len += size;
1948                        skb->truesize += size;
1949                        length -= size;
1950                        i++;
1951                } while (length > 0);
1952                ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1953                                      &hlen);
1954                __pskb_pull_tail(skb, hlen);
1955        }
1956        return skb;
1957}
1958
1959/* Process an inbound completion from an rx ring. */
1960static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1961                                   struct rx_ring *rx_ring,
1962                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1963                                   u16 vlan_id)
1964{
1965        struct net_device *ndev = qdev->ndev;
1966        struct sk_buff *skb = NULL;
1967
1968        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1969
1970        skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1971        if (unlikely(!skb)) {
1972                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1973                             "No skb available, drop packet.\n");
1974                rx_ring->rx_dropped++;
1975                return;
1976        }
1977
1978        /* Frame error, so drop the packet. */
1979        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1980                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1981                dev_kfree_skb_any(skb);
1982                return;
1983        }
1984
1985        /* The max framesize filter on this chip is set higher than
1986         * MTU since FCoE uses 2k frames.
1987         */
1988        if (skb->len > ndev->mtu + ETH_HLEN) {
1989                dev_kfree_skb_any(skb);
1990                rx_ring->rx_dropped++;
1991                return;
1992        }
1993
1994        /* loopback self test for ethtool */
1995        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1996                ql_check_lb_frame(qdev, skb);
1997                dev_kfree_skb_any(skb);
1998                return;
1999        }
2000
2001        prefetch(skb->data);
2002        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2003                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2004                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2006                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2008                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2009                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2010                rx_ring->rx_multicast++;
2011        }
2012        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2013                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2014                             "Promiscuous Packet.\n");
2015        }
2016
2017        skb->protocol = eth_type_trans(skb, ndev);
2018        skb_checksum_none_assert(skb);
2019
2020        /* If rx checksum is on, and there are no
2021         * csum or frame errors.
2022         */
2023        if ((ndev->features & NETIF_F_RXCSUM) &&
2024                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2025                /* TCP frame. */
2026                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2027                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2028                                     "TCP checksum done!\n");
2029                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2030                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2031                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2032                /* Unfragmented ipv4 UDP frame. */
2033                        struct iphdr *iph = (struct iphdr *) skb->data;
2034                        if (!(iph->frag_off &
2035                                htons(IP_MF|IP_OFFSET))) {
2036                                skb->ip_summed = CHECKSUM_UNNECESSARY;
2037                                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2038                                             "TCP checksum done!\n");
2039                        }
2040                }
2041        }
2042
2043        rx_ring->rx_packets++;
2044        rx_ring->rx_bytes += skb->len;
2045        skb_record_rx_queue(skb, rx_ring->cq_id);
2046        if (vlan_id != 0xffff)
2047                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2048        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2049                napi_gro_receive(&rx_ring->napi, skb);
2050        else
2051                netif_receive_skb(skb);
2052}
2053
2054/* Process an inbound completion from an rx ring. */
2055static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2056                                        struct rx_ring *rx_ring,
2057                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
2058{
2059        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2060        u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2061                        (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2062                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2063                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2064
2065        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2066
2067        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2068                /* The data and headers are split into
2069                 * separate buffers.
2070                 */
2071                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2072                                                vlan_id);
2073        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2074                /* The data fit in a single small buffer.
2075                 * Allocate a new skb, copy the data and
2076                 * return the buffer to the free pool.
2077                 */
2078                ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2079                                                length, vlan_id);
2080        } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2081                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2082                (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2083                /* TCP packet in a page chunk that's been checksummed.
2084                 * Tack it on to our GRO skb and let it go.
2085                 */
2086                ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2087                                                length, vlan_id);
2088        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2089                /* Non-TCP packet in a page chunk. Allocate an
2090                 * skb, tack it on frags, and send it up.
2091                 */
2092                ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2093                                                length, vlan_id);
2094        } else {
2095                /* Non-TCP/UDP large frames that span multiple buffers
2096                 * can be processed corrrectly by the split frame logic.
2097                 */
2098                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2099                                                vlan_id);
2100        }
2101
2102        return (unsigned long)length;
2103}
2104
2105/* Process an outbound completion from an rx ring. */
2106static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2107                                   struct ob_mac_iocb_rsp *mac_rsp)
2108{
2109        struct tx_ring *tx_ring;
2110        struct tx_ring_desc *tx_ring_desc;
2111
2112        QL_DUMP_OB_MAC_RSP(mac_rsp);
2113        tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2114        tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2115        ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2116        tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2117        tx_ring->tx_packets++;
2118        dev_kfree_skb(tx_ring_desc->skb);
2119        tx_ring_desc->skb = NULL;
2120
2121        if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2122                                        OB_MAC_IOCB_RSP_S |
2123                                        OB_MAC_IOCB_RSP_L |
2124                                        OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2125                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2126                        netif_warn(qdev, tx_done, qdev->ndev,
2127                                   "Total descriptor length did not match transfer length.\n");
2128                }
2129                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2130                        netif_warn(qdev, tx_done, qdev->ndev,
2131                                   "Frame too short to be valid, not sent.\n");
2132                }
2133                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2134                        netif_warn(qdev, tx_done, qdev->ndev,
2135                                   "Frame too long, but sent anyway.\n");
2136                }
2137                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2138                        netif_warn(qdev, tx_done, qdev->ndev,
2139                                   "PCI backplane error. Frame not sent.\n");
2140                }
2141        }
2142        atomic_inc(&tx_ring->tx_count);
2143}
2144
2145/* Fire up a handler to reset the MPI processor. */
2146void ql_queue_fw_error(struct ql_adapter *qdev)
2147{
2148        ql_link_off(qdev);
2149        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2150}
2151
2152void ql_queue_asic_error(struct ql_adapter *qdev)
2153{
2154        ql_link_off(qdev);
2155        ql_disable_interrupts(qdev);
2156        /* Clear adapter up bit to signal the recovery
2157         * process that it shouldn't kill the reset worker
2158         * thread
2159         */
2160        clear_bit(QL_ADAPTER_UP, &qdev->flags);
2161        /* Set asic recovery bit to indicate reset process that we are
2162         * in fatal error recovery process rather than normal close
2163         */
2164        set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2165        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2166}
2167
2168static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2169                                    struct ib_ae_iocb_rsp *ib_ae_rsp)
2170{
2171        switch (ib_ae_rsp->event) {
2172        case MGMT_ERR_EVENT:
2173                netif_err(qdev, rx_err, qdev->ndev,
2174                          "Management Processor Fatal Error.\n");
2175                ql_queue_fw_error(qdev);
2176                return;
2177
2178        case CAM_LOOKUP_ERR_EVENT:
2179                netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2180                netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2181                ql_queue_asic_error(qdev);
2182                return;
2183
2184        case SOFT_ECC_ERROR_EVENT:
2185                netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2186                ql_queue_asic_error(qdev);
2187                break;
2188
2189        case PCI_ERR_ANON_BUF_RD:
2190                netdev_err(qdev->ndev, "PCI error occurred when reading "
2191                                        "anonymous buffers from rx_ring %d.\n",
2192                                        ib_ae_rsp->q_id);
2193                ql_queue_asic_error(qdev);
2194                break;
2195
2196        default:
2197                netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2198                          ib_ae_rsp->event);
2199                ql_queue_asic_error(qdev);
2200                break;
2201        }
2202}
2203
2204static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2205{
2206        struct ql_adapter *qdev = rx_ring->qdev;
2207        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2208        struct ob_mac_iocb_rsp *net_rsp = NULL;
2209        int count = 0;
2210
2211        struct tx_ring *tx_ring;
2212        /* While there are entries in the completion queue. */
2213        while (prod != rx_ring->cnsmr_idx) {
2214
2215                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2216                             "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2217                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2218
2219                net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2220                rmb();
2221                switch (net_rsp->opcode) {
2222
2223                case OPCODE_OB_MAC_TSO_IOCB:
2224                case OPCODE_OB_MAC_IOCB:
2225                        ql_process_mac_tx_intr(qdev, net_rsp);
2226                        break;
2227                default:
2228                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2229                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2230                                     net_rsp->opcode);
2231                }
2232                count++;
2233                ql_update_cq(rx_ring);
2234                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2235        }
2236        if (!net_rsp)
2237                return 0;
2238        ql_write_cq_idx(rx_ring);
2239        tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2240        if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2241                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2242                        /*
2243                         * The queue got stopped because the tx_ring was full.
2244                         * Wake it up, because it's now at least 25% empty.
2245                         */
2246                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2247        }
2248
2249        return count;
2250}
2251
2252static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2253{
2254        struct ql_adapter *qdev = rx_ring->qdev;
2255        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256        struct ql_net_rsp_iocb *net_rsp;
2257        int count = 0;
2258
2259        /* While there are entries in the completion queue. */
2260        while (prod != rx_ring->cnsmr_idx) {
2261
2262                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2263                             "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2264                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2265
2266                net_rsp = rx_ring->curr_entry;
2267                rmb();
2268                switch (net_rsp->opcode) {
2269                case OPCODE_IB_MAC_IOCB:
2270                        ql_process_mac_rx_intr(qdev, rx_ring,
2271                                               (struct ib_mac_iocb_rsp *)
2272                                               net_rsp);
2273                        break;
2274
2275                case OPCODE_IB_AE_IOCB:
2276                        ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2277                                                net_rsp);
2278                        break;
2279                default:
2280                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2281                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2282                                     net_rsp->opcode);
2283                        break;
2284                }
2285                count++;
2286                ql_update_cq(rx_ring);
2287                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2288                if (count == budget)
2289                        break;
2290        }
2291        ql_update_buffer_queues(qdev, rx_ring);
2292        ql_write_cq_idx(rx_ring);
2293        return count;
2294}
2295
2296static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2297{
2298        struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2299        struct ql_adapter *qdev = rx_ring->qdev;
2300        struct rx_ring *trx_ring;
2301        int i, work_done = 0;
2302        struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2303
2304        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2305                     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2306
2307        /* Service the TX rings first.  They start
2308         * right after the RSS rings. */
2309        for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2310                trx_ring = &qdev->rx_ring[i];
2311                /* If this TX completion ring belongs to this vector and
2312                 * it's not empty then service it.
2313                 */
2314                if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2315                        (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2316                                        trx_ring->cnsmr_idx)) {
2317                        netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2318                                     "%s: Servicing TX completion ring %d.\n",
2319                                     __func__, trx_ring->cq_id);
2320                        ql_clean_outbound_rx_ring(trx_ring);
2321                }
2322        }
2323
2324        /*
2325         * Now service the RSS ring if it's active.
2326         */
2327        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2328                                        rx_ring->cnsmr_idx) {
2329                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2330                             "%s: Servicing RX completion ring %d.\n",
2331                             __func__, rx_ring->cq_id);
2332                work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2333        }
2334
2335        if (work_done < budget) {
2336                napi_complete_done(napi, work_done);
2337                ql_enable_completion_interrupt(qdev, rx_ring->irq);
2338        }
2339        return work_done;
2340}
2341
2342static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2343{
2344        struct ql_adapter *qdev = netdev_priv(ndev);
2345
2346        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2347                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2348                                 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2349        } else {
2350                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2351        }
2352}
2353
2354/**
2355 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2356 * based on the features to enable/disable hardware vlan accel
2357 */
2358static int qlge_update_hw_vlan_features(struct net_device *ndev,
2359                                        netdev_features_t features)
2360{
2361        struct ql_adapter *qdev = netdev_priv(ndev);
2362        int status = 0;
2363        bool need_restart = netif_running(ndev);
2364
2365        if (need_restart) {
2366                status = ql_adapter_down(qdev);
2367                if (status) {
2368                        netif_err(qdev, link, qdev->ndev,
2369                                  "Failed to bring down the adapter\n");
2370                        return status;
2371                }
2372        }
2373
2374        /* update the features with resent change */
2375        ndev->features = features;
2376
2377        if (need_restart) {
2378                status = ql_adapter_up(qdev);
2379                if (status) {
2380                        netif_err(qdev, link, qdev->ndev,
2381                                  "Failed to bring up the adapter\n");
2382                        return status;
2383                }
2384        }
2385
2386        return status;
2387}
2388
2389static netdev_features_t qlge_fix_features(struct net_device *ndev,
2390        netdev_features_t features)
2391{
2392        int err;
2393
2394        /* Update the behavior of vlan accel in the adapter */
2395        err = qlge_update_hw_vlan_features(ndev, features);
2396        if (err)
2397                return err;
2398
2399        return features;
2400}
2401
2402static int qlge_set_features(struct net_device *ndev,
2403        netdev_features_t features)
2404{
2405        netdev_features_t changed = ndev->features ^ features;
2406
2407        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2408                qlge_vlan_mode(ndev, features);
2409
2410        return 0;
2411}
2412
2413static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2414{
2415        u32 enable_bit = MAC_ADDR_E;
2416        int err;
2417
2418        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2419                                  MAC_ADDR_TYPE_VLAN, vid);
2420        if (err)
2421                netif_err(qdev, ifup, qdev->ndev,
2422                          "Failed to init vlan address.\n");
2423        return err;
2424}
2425
2426static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2427{
2428        struct ql_adapter *qdev = netdev_priv(ndev);
2429        int status;
2430        int err;
2431
2432        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2433        if (status)
2434                return status;
2435
2436        err = __qlge_vlan_rx_add_vid(qdev, vid);
2437        set_bit(vid, qdev->active_vlans);
2438
2439        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2440
2441        return err;
2442}
2443
2444static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2445{
2446        u32 enable_bit = 0;
2447        int err;
2448
2449        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2450                                  MAC_ADDR_TYPE_VLAN, vid);
2451        if (err)
2452                netif_err(qdev, ifup, qdev->ndev,
2453                          "Failed to clear vlan address.\n");
2454        return err;
2455}
2456
2457static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2458{
2459        struct ql_adapter *qdev = netdev_priv(ndev);
2460        int status;
2461        int err;
2462
2463        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2464        if (status)
2465                return status;
2466
2467        err = __qlge_vlan_rx_kill_vid(qdev, vid);
2468        clear_bit(vid, qdev->active_vlans);
2469
2470        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2471
2472        return err;
2473}
2474
2475static void qlge_restore_vlan(struct ql_adapter *qdev)
2476{
2477        int status;
2478        u16 vid;
2479
2480        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2481        if (status)
2482                return;
2483
2484        for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2485                __qlge_vlan_rx_add_vid(qdev, vid);
2486
2487        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2488}
2489
2490/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2491static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2492{
2493        struct rx_ring *rx_ring = dev_id;
2494        napi_schedule(&rx_ring->napi);
2495        return IRQ_HANDLED;
2496}
2497
2498/* This handles a fatal error, MPI activity, and the default
2499 * rx_ring in an MSI-X multiple vector environment.
2500 * In MSI/Legacy environment it also process the rest of
2501 * the rx_rings.
2502 */
2503static irqreturn_t qlge_isr(int irq, void *dev_id)
2504{
2505        struct rx_ring *rx_ring = dev_id;
2506        struct ql_adapter *qdev = rx_ring->qdev;
2507        struct intr_context *intr_context = &qdev->intr_context[0];
2508        u32 var;
2509        int work_done = 0;
2510
2511        spin_lock(&qdev->hw_lock);
2512        if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2513                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2514                             "Shared Interrupt, Not ours!\n");
2515                spin_unlock(&qdev->hw_lock);
2516                return IRQ_NONE;
2517        }
2518        spin_unlock(&qdev->hw_lock);
2519
2520        var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2521
2522        /*
2523         * Check for fatal error.
2524         */
2525        if (var & STS_FE) {
2526                ql_queue_asic_error(qdev);
2527                netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2528                var = ql_read32(qdev, ERR_STS);
2529                netdev_err(qdev->ndev, "Resetting chip. "
2530                                        "Error Status Register = 0x%x\n", var);
2531                return IRQ_HANDLED;
2532        }
2533
2534        /*
2535         * Check MPI processor activity.
2536         */
2537        if ((var & STS_PI) &&
2538                (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2539                /*
2540                 * We've got an async event or mailbox completion.
2541                 * Handle it and clear the source of the interrupt.
2542                 */
2543                netif_err(qdev, intr, qdev->ndev,
2544                          "Got MPI processor interrupt.\n");
2545                ql_disable_completion_interrupt(qdev, intr_context->intr);
2546                ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2547                queue_delayed_work_on(smp_processor_id(),
2548                                qdev->workqueue, &qdev->mpi_work, 0);
2549                work_done++;
2550        }
2551
2552        /*
2553         * Get the bit-mask that shows the active queues for this
2554         * pass.  Compare it to the queues that this irq services
2555         * and call napi if there's a match.
2556         */
2557        var = ql_read32(qdev, ISR1);
2558        if (var & intr_context->irq_mask) {
2559                netif_info(qdev, intr, qdev->ndev,
2560                           "Waking handler for rx_ring[0].\n");
2561                ql_disable_completion_interrupt(qdev, intr_context->intr);
2562                napi_schedule(&rx_ring->napi);
2563                work_done++;
2564        }
2565        ql_enable_completion_interrupt(qdev, intr_context->intr);
2566        return work_done ? IRQ_HANDLED : IRQ_NONE;
2567}
2568
2569static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2570{
2571
2572        if (skb_is_gso(skb)) {
2573                int err;
2574                __be16 l3_proto = vlan_get_protocol(skb);
2575
2576                err = skb_cow_head(skb, 0);
2577                if (err < 0)
2578                        return err;
2579
2580                mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2581                mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2582                mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2583                mac_iocb_ptr->total_hdrs_len =
2584                    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2585                mac_iocb_ptr->net_trans_offset =
2586                    cpu_to_le16(skb_network_offset(skb) |
2587                                skb_transport_offset(skb)
2588                                << OB_MAC_TRANSPORT_HDR_SHIFT);
2589                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2590                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2591                if (likely(l3_proto == htons(ETH_P_IP))) {
2592                        struct iphdr *iph = ip_hdr(skb);
2593                        iph->check = 0;
2594                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2595                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2596                                                                 iph->daddr, 0,
2597                                                                 IPPROTO_TCP,
2598                                                                 0);
2599                } else if (l3_proto == htons(ETH_P_IPV6)) {
2600                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2601                        tcp_hdr(skb)->check =
2602                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2603                                             &ipv6_hdr(skb)->daddr,
2604                                             0, IPPROTO_TCP, 0);
2605                }
2606                return 1;
2607        }
2608        return 0;
2609}
2610
2611static void ql_hw_csum_setup(struct sk_buff *skb,
2612                             struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2613{
2614        int len;
2615        struct iphdr *iph = ip_hdr(skb);
2616        __sum16 *check;
2617        mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2618        mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2619        mac_iocb_ptr->net_trans_offset =
2620                cpu_to_le16(skb_network_offset(skb) |
2621                skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2622
2623        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2624        len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2625        if (likely(iph->protocol == IPPROTO_TCP)) {
2626                check = &(tcp_hdr(skb)->check);
2627                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2628                mac_iocb_ptr->total_hdrs_len =
2629                    cpu_to_le16(skb_transport_offset(skb) +
2630                                (tcp_hdr(skb)->doff << 2));
2631        } else {
2632                check = &(udp_hdr(skb)->check);
2633                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2634                mac_iocb_ptr->total_hdrs_len =
2635                    cpu_to_le16(skb_transport_offset(skb) +
2636                                sizeof(struct udphdr));
2637        }
2638        *check = ~csum_tcpudp_magic(iph->saddr,
2639                                    iph->daddr, len, iph->protocol, 0);
2640}
2641
2642static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2643{
2644        struct tx_ring_desc *tx_ring_desc;
2645        struct ob_mac_iocb_req *mac_iocb_ptr;
2646        struct ql_adapter *qdev = netdev_priv(ndev);
2647        int tso;
2648        struct tx_ring *tx_ring;
2649        u32 tx_ring_idx = (u32) skb->queue_mapping;
2650
2651        tx_ring = &qdev->tx_ring[tx_ring_idx];
2652
2653        if (skb_padto(skb, ETH_ZLEN))
2654                return NETDEV_TX_OK;
2655
2656        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2657                netif_info(qdev, tx_queued, qdev->ndev,
2658                           "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2659                           __func__, tx_ring_idx);
2660                netif_stop_subqueue(ndev, tx_ring->wq_id);
2661                tx_ring->tx_errors++;
2662                return NETDEV_TX_BUSY;
2663        }
2664        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2665        mac_iocb_ptr = tx_ring_desc->queue_entry;
2666        memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2667
2668        mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2669        mac_iocb_ptr->tid = tx_ring_desc->index;
2670        /* We use the upper 32-bits to store the tx queue for this IO.
2671         * When we get the completion we can use it to establish the context.
2672         */
2673        mac_iocb_ptr->txq_idx = tx_ring_idx;
2674        tx_ring_desc->skb = skb;
2675
2676        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2677
2678        if (skb_vlan_tag_present(skb)) {
2679                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2680                             "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2681                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2682                mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2683        }
2684        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2685        if (tso < 0) {
2686                dev_kfree_skb_any(skb);
2687                return NETDEV_TX_OK;
2688        } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2689                ql_hw_csum_setup(skb,
2690                                 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2691        }
2692        if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2693                        NETDEV_TX_OK) {
2694                netif_err(qdev, tx_queued, qdev->ndev,
2695                          "Could not map the segments.\n");
2696                tx_ring->tx_errors++;
2697                return NETDEV_TX_BUSY;
2698        }
2699        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2700        tx_ring->prod_idx++;
2701        if (tx_ring->prod_idx == tx_ring->wq_len)
2702                tx_ring->prod_idx = 0;
2703        wmb();
2704
2705        ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2706        netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2707                     "tx queued, slot %d, len %d\n",
2708                     tx_ring->prod_idx, skb->len);
2709
2710        atomic_dec(&tx_ring->tx_count);
2711
2712        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2713                netif_stop_subqueue(ndev, tx_ring->wq_id);
2714                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2715                        /*
2716                         * The queue got stopped because the tx_ring was full.
2717                         * Wake it up, because it's now at least 25% empty.
2718                         */
2719                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2720        }
2721        return NETDEV_TX_OK;
2722}
2723
2724
2725static void ql_free_shadow_space(struct ql_adapter *qdev)
2726{
2727        if (qdev->rx_ring_shadow_reg_area) {
2728                pci_free_consistent(qdev->pdev,
2729                                    PAGE_SIZE,
2730                                    qdev->rx_ring_shadow_reg_area,
2731                                    qdev->rx_ring_shadow_reg_dma);
2732                qdev->rx_ring_shadow_reg_area = NULL;
2733        }
2734        if (qdev->tx_ring_shadow_reg_area) {
2735                pci_free_consistent(qdev->pdev,
2736                                    PAGE_SIZE,
2737                                    qdev->tx_ring_shadow_reg_area,
2738                                    qdev->tx_ring_shadow_reg_dma);
2739                qdev->tx_ring_shadow_reg_area = NULL;
2740        }
2741}
2742
2743static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2744{
2745        qdev->rx_ring_shadow_reg_area =
2746                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2747                                      &qdev->rx_ring_shadow_reg_dma);
2748        if (qdev->rx_ring_shadow_reg_area == NULL) {
2749                netif_err(qdev, ifup, qdev->ndev,
2750                          "Allocation of RX shadow space failed.\n");
2751                return -ENOMEM;
2752        }
2753
2754        qdev->tx_ring_shadow_reg_area =
2755                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2756                                      &qdev->tx_ring_shadow_reg_dma);
2757        if (qdev->tx_ring_shadow_reg_area == NULL) {
2758                netif_err(qdev, ifup, qdev->ndev,
2759                          "Allocation of TX shadow space failed.\n");
2760                goto err_wqp_sh_area;
2761        }
2762        return 0;
2763
2764err_wqp_sh_area:
2765        pci_free_consistent(qdev->pdev,
2766                            PAGE_SIZE,
2767                            qdev->rx_ring_shadow_reg_area,
2768                            qdev->rx_ring_shadow_reg_dma);
2769        return -ENOMEM;
2770}
2771
2772static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2773{
2774        struct tx_ring_desc *tx_ring_desc;
2775        int i;
2776        struct ob_mac_iocb_req *mac_iocb_ptr;
2777
2778        mac_iocb_ptr = tx_ring->wq_base;
2779        tx_ring_desc = tx_ring->q;
2780        for (i = 0; i < tx_ring->wq_len; i++) {
2781                tx_ring_desc->index = i;
2782                tx_ring_desc->skb = NULL;
2783                tx_ring_desc->queue_entry = mac_iocb_ptr;
2784                mac_iocb_ptr++;
2785                tx_ring_desc++;
2786        }
2787        atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2788}
2789
2790static void ql_free_tx_resources(struct ql_adapter *qdev,
2791                                 struct tx_ring *tx_ring)
2792{
2793        if (tx_ring->wq_base) {
2794                pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2795                                    tx_ring->wq_base, tx_ring->wq_base_dma);
2796                tx_ring->wq_base = NULL;
2797        }
2798        kfree(tx_ring->q);
2799        tx_ring->q = NULL;
2800}
2801
2802static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2803                                 struct tx_ring *tx_ring)
2804{
2805        tx_ring->wq_base =
2806            pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2807                                 &tx_ring->wq_base_dma);
2808
2809        if ((tx_ring->wq_base == NULL) ||
2810            tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2811                goto pci_alloc_err;
2812
2813        tx_ring->q =
2814            kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2815        if (tx_ring->q == NULL)
2816                goto err;
2817
2818        return 0;
2819err:
2820        pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2821                            tx_ring->wq_base, tx_ring->wq_base_dma);
2822        tx_ring->wq_base = NULL;
2823pci_alloc_err:
2824        netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2825        return -ENOMEM;
2826}
2827
2828static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2829{
2830        struct bq_desc *lbq_desc;
2831
2832        uint32_t  curr_idx, clean_idx;
2833
2834        curr_idx = rx_ring->lbq_curr_idx;
2835        clean_idx = rx_ring->lbq_clean_idx;
2836        while (curr_idx != clean_idx) {
2837                lbq_desc = &rx_ring->lbq[curr_idx];
2838
2839                if (lbq_desc->p.pg_chunk.last_flag) {
2840                        pci_unmap_page(qdev->pdev,
2841                                lbq_desc->p.pg_chunk.map,
2842                                ql_lbq_block_size(qdev),
2843                                       PCI_DMA_FROMDEVICE);
2844                        lbq_desc->p.pg_chunk.last_flag = 0;
2845                }
2846
2847                put_page(lbq_desc->p.pg_chunk.page);
2848                lbq_desc->p.pg_chunk.page = NULL;
2849
2850                if (++curr_idx == rx_ring->lbq_len)
2851                        curr_idx = 0;
2852
2853        }
2854        if (rx_ring->pg_chunk.page) {
2855                pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2856                        ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2857                put_page(rx_ring->pg_chunk.page);
2858                rx_ring->pg_chunk.page = NULL;
2859        }
2860}
2861
2862static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2863{
2864        int i;
2865        struct bq_desc *sbq_desc;
2866
2867        for (i = 0; i < rx_ring->sbq_len; i++) {
2868                sbq_desc = &rx_ring->sbq[i];
2869                if (sbq_desc == NULL) {
2870                        netif_err(qdev, ifup, qdev->ndev,
2871                                  "sbq_desc %d is NULL.\n", i);
2872                        return;
2873                }
2874                if (sbq_desc->p.skb) {
2875                        pci_unmap_single(qdev->pdev,
2876                                         dma_unmap_addr(sbq_desc, mapaddr),
2877                                         dma_unmap_len(sbq_desc, maplen),
2878                                         PCI_DMA_FROMDEVICE);
2879                        dev_kfree_skb(sbq_desc->p.skb);
2880                        sbq_desc->p.skb = NULL;
2881                }
2882        }
2883}
2884
2885/* Free all large and small rx buffers associated
2886 * with the completion queues for this device.
2887 */
2888static void ql_free_rx_buffers(struct ql_adapter *qdev)
2889{
2890        int i;
2891        struct rx_ring *rx_ring;
2892
2893        for (i = 0; i < qdev->rx_ring_count; i++) {
2894                rx_ring = &qdev->rx_ring[i];
2895                if (rx_ring->lbq)
2896                        ql_free_lbq_buffers(qdev, rx_ring);
2897                if (rx_ring->sbq)
2898                        ql_free_sbq_buffers(qdev, rx_ring);
2899        }
2900}
2901
2902static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2903{
2904        struct rx_ring *rx_ring;
2905        int i;
2906
2907        for (i = 0; i < qdev->rx_ring_count; i++) {
2908                rx_ring = &qdev->rx_ring[i];
2909                if (rx_ring->type != TX_Q)
2910                        ql_update_buffer_queues(qdev, rx_ring);
2911        }
2912}
2913
2914static void ql_init_lbq_ring(struct ql_adapter *qdev,
2915                                struct rx_ring *rx_ring)
2916{
2917        int i;
2918        struct bq_desc *lbq_desc;
2919        __le64 *bq = rx_ring->lbq_base;
2920
2921        memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2922        for (i = 0; i < rx_ring->lbq_len; i++) {
2923                lbq_desc = &rx_ring->lbq[i];
2924                memset(lbq_desc, 0, sizeof(*lbq_desc));
2925                lbq_desc->index = i;
2926                lbq_desc->addr = bq;
2927                bq++;
2928        }
2929}
2930
2931static void ql_init_sbq_ring(struct ql_adapter *qdev,
2932                                struct rx_ring *rx_ring)
2933{
2934        int i;
2935        struct bq_desc *sbq_desc;
2936        __le64 *bq = rx_ring->sbq_base;
2937
2938        memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2939        for (i = 0; i < rx_ring->sbq_len; i++) {
2940                sbq_desc = &rx_ring->sbq[i];
2941                memset(sbq_desc, 0, sizeof(*sbq_desc));
2942                sbq_desc->index = i;
2943                sbq_desc->addr = bq;
2944                bq++;
2945        }
2946}
2947
2948static void ql_free_rx_resources(struct ql_adapter *qdev,
2949                                 struct rx_ring *rx_ring)
2950{
2951        /* Free the small buffer queue. */
2952        if (rx_ring->sbq_base) {
2953                pci_free_consistent(qdev->pdev,
2954                                    rx_ring->sbq_size,
2955                                    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2956                rx_ring->sbq_base = NULL;
2957        }
2958
2959        /* Free the small buffer queue control blocks. */
2960        kfree(rx_ring->sbq);
2961        rx_ring->sbq = NULL;
2962
2963        /* Free the large buffer queue. */
2964        if (rx_ring->lbq_base) {
2965                pci_free_consistent(qdev->pdev,
2966                                    rx_ring->lbq_size,
2967                                    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2968                rx_ring->lbq_base = NULL;
2969        }
2970
2971        /* Free the large buffer queue control blocks. */
2972        kfree(rx_ring->lbq);
2973        rx_ring->lbq = NULL;
2974
2975        /* Free the rx queue. */
2976        if (rx_ring->cq_base) {
2977                pci_free_consistent(qdev->pdev,
2978                                    rx_ring->cq_size,
2979                                    rx_ring->cq_base, rx_ring->cq_base_dma);
2980                rx_ring->cq_base = NULL;
2981        }
2982}
2983
2984/* Allocate queues and buffers for this completions queue based
2985 * on the values in the parameter structure. */
2986static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2987                                 struct rx_ring *rx_ring)
2988{
2989
2990        /*
2991         * Allocate the completion queue for this rx_ring.
2992         */
2993        rx_ring->cq_base =
2994            pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2995                                 &rx_ring->cq_base_dma);
2996
2997        if (rx_ring->cq_base == NULL) {
2998                netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2999                return -ENOMEM;
3000        }
3001
3002        if (rx_ring->sbq_len) {
3003                /*
3004                 * Allocate small buffer queue.
3005                 */
3006                rx_ring->sbq_base =
3007                    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3008                                         &rx_ring->sbq_base_dma);
3009
3010                if (rx_ring->sbq_base == NULL) {
3011                        netif_err(qdev, ifup, qdev->ndev,
3012                                  "Small buffer queue allocation failed.\n");
3013                        goto err_mem;
3014                }
3015
3016                /*
3017                 * Allocate small buffer queue control blocks.
3018                 */
3019                rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3020                                             sizeof(struct bq_desc),
3021                                             GFP_KERNEL);
3022                if (rx_ring->sbq == NULL)
3023                        goto err_mem;
3024
3025                ql_init_sbq_ring(qdev, rx_ring);
3026        }
3027
3028        if (rx_ring->lbq_len) {
3029                /*
3030                 * Allocate large buffer queue.
3031                 */
3032                rx_ring->lbq_base =
3033                    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3034                                         &rx_ring->lbq_base_dma);
3035
3036                if (rx_ring->lbq_base == NULL) {
3037                        netif_err(qdev, ifup, qdev->ndev,
3038                                  "Large buffer queue allocation failed.\n");
3039                        goto err_mem;
3040                }
3041                /*
3042                 * Allocate large buffer queue control blocks.
3043                 */
3044                rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3045                                             sizeof(struct bq_desc),
3046                                             GFP_KERNEL);
3047                if (rx_ring->lbq == NULL)
3048                        goto err_mem;
3049
3050                ql_init_lbq_ring(qdev, rx_ring);
3051        }
3052
3053        return 0;
3054
3055err_mem:
3056        ql_free_rx_resources(qdev, rx_ring);
3057        return -ENOMEM;
3058}
3059
3060static void ql_tx_ring_clean(struct ql_adapter *qdev)
3061{
3062        struct tx_ring *tx_ring;
3063        struct tx_ring_desc *tx_ring_desc;
3064        int i, j;
3065
3066        /*
3067         * Loop through all queues and free
3068         * any resources.
3069         */
3070        for (j = 0; j < qdev->tx_ring_count; j++) {
3071                tx_ring = &qdev->tx_ring[j];
3072                for (i = 0; i < tx_ring->wq_len; i++) {
3073                        tx_ring_desc = &tx_ring->q[i];
3074                        if (tx_ring_desc && tx_ring_desc->skb) {
3075                                netif_err(qdev, ifdown, qdev->ndev,
3076                                          "Freeing lost SKB %p, from queue %d, index %d.\n",
3077                                          tx_ring_desc->skb, j,
3078                                          tx_ring_desc->index);
3079                                ql_unmap_send(qdev, tx_ring_desc,
3080                                              tx_ring_desc->map_cnt);
3081                                dev_kfree_skb(tx_ring_desc->skb);
3082                                tx_ring_desc->skb = NULL;
3083                        }
3084                }
3085        }
3086}
3087
3088static void ql_free_mem_resources(struct ql_adapter *qdev)
3089{
3090        int i;
3091
3092        for (i = 0; i < qdev->tx_ring_count; i++)
3093                ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3094        for (i = 0; i < qdev->rx_ring_count; i++)
3095                ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3096        ql_free_shadow_space(qdev);
3097}
3098
3099static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3100{
3101        int i;
3102
3103        /* Allocate space for our shadow registers and such. */
3104        if (ql_alloc_shadow_space(qdev))
3105                return -ENOMEM;
3106
3107        for (i = 0; i < qdev->rx_ring_count; i++) {
3108                if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3109                        netif_err(qdev, ifup, qdev->ndev,
3110                                  "RX resource allocation failed.\n");
3111                        goto err_mem;
3112                }
3113        }
3114        /* Allocate tx queue resources */
3115        for (i = 0; i < qdev->tx_ring_count; i++) {
3116                if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3117                        netif_err(qdev, ifup, qdev->ndev,
3118                                  "TX resource allocation failed.\n");
3119                        goto err_mem;
3120                }
3121        }
3122        return 0;
3123
3124err_mem:
3125        ql_free_mem_resources(qdev);
3126        return -ENOMEM;
3127}
3128
3129/* Set up the rx ring control block and pass it to the chip.
3130 * The control block is defined as
3131 * "Completion Queue Initialization Control Block", or cqicb.
3132 */
3133static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3134{
3135        struct cqicb *cqicb = &rx_ring->cqicb;
3136        void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3137                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3138        u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3139                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3140        void __iomem *doorbell_area =
3141            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3142        int err = 0;
3143        u16 bq_len;
3144        u64 tmp;
3145        __le64 *base_indirect_ptr;
3146        int page_entries;
3147
3148        /* Set up the shadow registers for this ring. */
3149        rx_ring->prod_idx_sh_reg = shadow_reg;
3150        rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3151        *rx_ring->prod_idx_sh_reg = 0;
3152        shadow_reg += sizeof(u64);
3153        shadow_reg_dma += sizeof(u64);
3154        rx_ring->lbq_base_indirect = shadow_reg;
3155        rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3156        shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3157        shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3158        rx_ring->sbq_base_indirect = shadow_reg;
3159        rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3160
3161        /* PCI doorbell mem area + 0x00 for consumer index register */
3162        rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3163        rx_ring->cnsmr_idx = 0;
3164        rx_ring->curr_entry = rx_ring->cq_base;
3165
3166        /* PCI doorbell mem area + 0x04 for valid register */
3167        rx_ring->valid_db_reg = doorbell_area + 0x04;
3168
3169        /* PCI doorbell mem area + 0x18 for large buffer consumer */
3170        rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3171
3172        /* PCI doorbell mem area + 0x1c */
3173        rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3174
3175        memset((void *)cqicb, 0, sizeof(struct cqicb));
3176        cqicb->msix_vect = rx_ring->irq;
3177
3178        bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3179        cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3180
3181        cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3182
3183        cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3184
3185        /*
3186         * Set up the control block load flags.
3187         */
3188        cqicb->flags = FLAGS_LC |       /* Load queue base address */
3189            FLAGS_LV |          /* Load MSI-X vector */
3190            FLAGS_LI;           /* Load irq delay values */
3191        if (rx_ring->lbq_len) {
3192                cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3193                tmp = (u64)rx_ring->lbq_base_dma;
3194                base_indirect_ptr = rx_ring->lbq_base_indirect;
3195                page_entries = 0;
3196                do {
3197                        *base_indirect_ptr = cpu_to_le64(tmp);
3198                        tmp += DB_PAGE_SIZE;
3199                        base_indirect_ptr++;
3200                        page_entries++;
3201                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3202                cqicb->lbq_addr =
3203                    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3204                bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3205                        (u16) rx_ring->lbq_buf_size;
3206                cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3207                bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3208                        (u16) rx_ring->lbq_len;
3209                cqicb->lbq_len = cpu_to_le16(bq_len);
3210                rx_ring->lbq_prod_idx = 0;
3211                rx_ring->lbq_curr_idx = 0;
3212                rx_ring->lbq_clean_idx = 0;
3213                rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3214        }
3215        if (rx_ring->sbq_len) {
3216                cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3217                tmp = (u64)rx_ring->sbq_base_dma;
3218                base_indirect_ptr = rx_ring->sbq_base_indirect;
3219                page_entries = 0;
3220                do {
3221                        *base_indirect_ptr = cpu_to_le64(tmp);
3222                        tmp += DB_PAGE_SIZE;
3223                        base_indirect_ptr++;
3224                        page_entries++;
3225                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3226                cqicb->sbq_addr =
3227                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3228                cqicb->sbq_buf_size =
3229                    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3230                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3231                        (u16) rx_ring->sbq_len;
3232                cqicb->sbq_len = cpu_to_le16(bq_len);
3233                rx_ring->sbq_prod_idx = 0;
3234                rx_ring->sbq_curr_idx = 0;
3235                rx_ring->sbq_clean_idx = 0;
3236                rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3237        }
3238        switch (rx_ring->type) {
3239        case TX_Q:
3240                cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3241                cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3242                break;
3243        case RX_Q:
3244                /* Inbound completion handling rx_rings run in
3245                 * separate NAPI contexts.
3246                 */
3247                netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3248                               64);
3249                cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3250                cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3251                break;
3252        default:
3253                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3254                             "Invalid rx_ring->type = %d.\n", rx_ring->type);
3255        }
3256        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3257                           CFG_LCQ, rx_ring->cq_id);
3258        if (err) {
3259                netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3260                return err;
3261        }
3262        return err;
3263}
3264
3265static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3266{
3267        struct wqicb *wqicb = (struct wqicb *)tx_ring;
3268        void __iomem *doorbell_area =
3269            qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3270        void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3271            (tx_ring->wq_id * sizeof(u64));
3272        u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3273            (tx_ring->wq_id * sizeof(u64));
3274        int err = 0;
3275
3276        /*
3277         * Assign doorbell registers for this tx_ring.
3278         */
3279        /* TX PCI doorbell mem area for tx producer index */
3280        tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3281        tx_ring->prod_idx = 0;
3282        /* TX PCI doorbell mem area + 0x04 */
3283        tx_ring->valid_db_reg = doorbell_area + 0x04;
3284
3285        /*
3286         * Assign shadow registers for this tx_ring.
3287         */
3288        tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3289        tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3290
3291        wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3292        wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3293                                   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3294        wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3295        wqicb->rid = 0;
3296        wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3297
3298        wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3299
3300        ql_init_tx_ring(qdev, tx_ring);
3301
3302        err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3303                           (u16) tx_ring->wq_id);
3304        if (err) {
3305                netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3306                return err;
3307        }
3308        return err;
3309}
3310
3311static void ql_disable_msix(struct ql_adapter *qdev)
3312{
3313        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3314                pci_disable_msix(qdev->pdev);
3315                clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3316                kfree(qdev->msi_x_entry);
3317                qdev->msi_x_entry = NULL;
3318        } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3319                pci_disable_msi(qdev->pdev);
3320                clear_bit(QL_MSI_ENABLED, &qdev->flags);
3321        }
3322}
3323
3324/* We start by trying to get the number of vectors
3325 * stored in qdev->intr_count. If we don't get that
3326 * many then we reduce the count and try again.
3327 */
3328static void ql_enable_msix(struct ql_adapter *qdev)
3329{
3330        int i, err;
3331
3332        /* Get the MSIX vectors. */
3333        if (qlge_irq_type == MSIX_IRQ) {
3334                /* Try to alloc space for the msix struct,
3335                 * if it fails then go to MSI/legacy.
3336                 */
3337                qdev->msi_x_entry = kcalloc(qdev->intr_count,
3338                                            sizeof(struct msix_entry),
3339                                            GFP_KERNEL);
3340                if (!qdev->msi_x_entry) {
3341                        qlge_irq_type = MSI_IRQ;
3342                        goto msi;
3343                }
3344
3345                for (i = 0; i < qdev->intr_count; i++)
3346                        qdev->msi_x_entry[i].entry = i;
3347
3348                err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3349                                            1, qdev->intr_count);
3350                if (err < 0) {
3351                        kfree(qdev->msi_x_entry);
3352                        qdev->msi_x_entry = NULL;
3353                        netif_warn(qdev, ifup, qdev->ndev,
3354                                   "MSI-X Enable failed, trying MSI.\n");
3355                        qlge_irq_type = MSI_IRQ;
3356                } else {
3357                        qdev->intr_count = err;
3358                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
3359                        netif_info(qdev, ifup, qdev->ndev,
3360                                   "MSI-X Enabled, got %d vectors.\n",
3361                                   qdev->intr_count);
3362                        return;
3363                }
3364        }
3365msi:
3366        qdev->intr_count = 1;
3367        if (qlge_irq_type == MSI_IRQ) {
3368                if (!pci_enable_msi(qdev->pdev)) {
3369                        set_bit(QL_MSI_ENABLED, &qdev->flags);
3370                        netif_info(qdev, ifup, qdev->ndev,
3371                                   "Running with MSI interrupts.\n");
3372                        return;
3373                }
3374        }
3375        qlge_irq_type = LEG_IRQ;
3376        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3377                     "Running with legacy interrupts.\n");
3378}
3379
3380/* Each vector services 1 RSS ring and and 1 or more
3381 * TX completion rings.  This function loops through
3382 * the TX completion rings and assigns the vector that
3383 * will service it.  An example would be if there are
3384 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3385 * This would mean that vector 0 would service RSS ring 0
3386 * and TX completion rings 0,1,2 and 3.  Vector 1 would
3387 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3388 */
3389static void ql_set_tx_vect(struct ql_adapter *qdev)
3390{
3391        int i, j, vect;
3392        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3393
3394        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3395                /* Assign irq vectors to TX rx_rings.*/
3396                for (vect = 0, j = 0, i = qdev->rss_ring_count;
3397                                         i < qdev->rx_ring_count; i++) {
3398                        if (j == tx_rings_per_vector) {
3399                                vect++;
3400                                j = 0;
3401                        }
3402                        qdev->rx_ring[i].irq = vect;
3403                        j++;
3404                }
3405        } else {
3406                /* For single vector all rings have an irq
3407                 * of zero.
3408                 */
3409                for (i = 0; i < qdev->rx_ring_count; i++)
3410                        qdev->rx_ring[i].irq = 0;
3411        }
3412}
3413
3414/* Set the interrupt mask for this vector.  Each vector
3415 * will service 1 RSS ring and 1 or more TX completion
3416 * rings.  This function sets up a bit mask per vector
3417 * that indicates which rings it services.
3418 */
3419static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3420{
3421        int j, vect = ctx->intr;
3422        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3423
3424        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3425                /* Add the RSS ring serviced by this vector
3426                 * to the mask.
3427                 */
3428                ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3429                /* Add the TX ring(s) serviced by this vector
3430                 * to the mask. */
3431                for (j = 0; j < tx_rings_per_vector; j++) {
3432                        ctx->irq_mask |=
3433                        (1 << qdev->rx_ring[qdev->rss_ring_count +
3434                        (vect * tx_rings_per_vector) + j].cq_id);
3435                }
3436        } else {
3437                /* For single vector we just shift each queue's
3438                 * ID into the mask.
3439                 */
3440                for (j = 0; j < qdev->rx_ring_count; j++)
3441                        ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3442        }
3443}
3444
3445/*
3446 * Here we build the intr_context structures based on
3447 * our rx_ring count and intr vector count.
3448 * The intr_context structure is used to hook each vector
3449 * to possibly different handlers.
3450 */
3451static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3452{
3453        int i = 0;
3454        struct intr_context *intr_context = &qdev->intr_context[0];
3455
3456        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3457                /* Each rx_ring has it's
3458                 * own intr_context since we have separate
3459                 * vectors for each queue.
3460                 */
3461                for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3462                        qdev->rx_ring[i].irq = i;
3463                        intr_context->intr = i;
3464                        intr_context->qdev = qdev;
3465                        /* Set up this vector's bit-mask that indicates
3466                         * which queues it services.
3467                         */
3468                        ql_set_irq_mask(qdev, intr_context);
3469                        /*
3470                         * We set up each vectors enable/disable/read bits so
3471                         * there's no bit/mask calculations in the critical path.
3472                         */
3473                        intr_context->intr_en_mask =
3474                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3475                            INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3476                            | i;
3477                        intr_context->intr_dis_mask =
3478                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3479                            INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3480                            INTR_EN_IHD | i;
3481                        intr_context->intr_read_mask =
3482                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3483                            INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3484                            i;
3485                        if (i == 0) {
3486                                /* The first vector/queue handles
3487                                 * broadcast/multicast, fatal errors,
3488                                 * and firmware events.  This in addition
3489                                 * to normal inbound NAPI processing.
3490                                 */
3491                                intr_context->handler = qlge_isr;
3492                                sprintf(intr_context->name, "%s-rx-%d",
3493                                        qdev->ndev->name, i);
3494                        } else {
3495                                /*
3496                                 * Inbound queues handle unicast frames only.
3497                                 */
3498                                intr_context->handler = qlge_msix_rx_isr;
3499                                sprintf(intr_context->name, "%s-rx-%d",
3500                                        qdev->ndev->name, i);
3501                        }
3502                }
3503        } else {
3504                /*
3505                 * All rx_rings use the same intr_context since
3506                 * there is only one vector.
3507                 */
3508                intr_context->intr = 0;
3509                intr_context->qdev = qdev;
3510                /*
3511                 * We set up each vectors enable/disable/read bits so
3512                 * there's no bit/mask calculations in the critical path.
3513                 */
3514                intr_context->intr_en_mask =
3515                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3516                intr_context->intr_dis_mask =
3517                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3518                    INTR_EN_TYPE_DISABLE;
3519                intr_context->intr_read_mask =
3520                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3521                /*
3522                 * Single interrupt means one handler for all rings.
3523                 */
3524                intr_context->handler = qlge_isr;
3525                sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3526                /* Set up this vector's bit-mask that indicates
3527                 * which queues it services. In this case there is
3528                 * a single vector so it will service all RSS and
3529                 * TX completion rings.
3530                 */
3531                ql_set_irq_mask(qdev, intr_context);
3532        }
3533        /* Tell the TX completion rings which MSIx vector
3534         * they will be using.
3535         */
3536        ql_set_tx_vect(qdev);
3537}
3538
3539static void ql_free_irq(struct ql_adapter *qdev)
3540{
3541        int i;
3542        struct intr_context *intr_context = &qdev->intr_context[0];
3543
3544        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3545                if (intr_context->hooked) {
3546                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3547                                free_irq(qdev->msi_x_entry[i].vector,
3548                                         &qdev->rx_ring[i]);
3549                        } else {
3550                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3551                        }
3552                }
3553        }
3554        ql_disable_msix(qdev);
3555}
3556
3557static int ql_request_irq(struct ql_adapter *qdev)
3558{
3559        int i;
3560        int status = 0;
3561        struct pci_dev *pdev = qdev->pdev;
3562        struct intr_context *intr_context = &qdev->intr_context[0];
3563
3564        ql_resolve_queues_to_irqs(qdev);
3565
3566        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3567                atomic_set(&intr_context->irq_cnt, 0);
3568                if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3569                        status = request_irq(qdev->msi_x_entry[i].vector,
3570                                             intr_context->handler,
3571                                             0,
3572                                             intr_context->name,
3573                                             &qdev->rx_ring[i]);
3574                        if (status) {
3575                                netif_err(qdev, ifup, qdev->ndev,
3576                                          "Failed request for MSIX interrupt %d.\n",
3577                                          i);
3578                                goto err_irq;
3579                        }
3580                } else {
3581                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3582                                     "trying msi or legacy interrupts.\n");
3583                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584                                     "%s: irq = %d.\n", __func__, pdev->irq);
3585                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586                                     "%s: context->name = %s.\n", __func__,
3587                                     intr_context->name);
3588                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3589                                     "%s: dev_id = 0x%p.\n", __func__,
3590                                     &qdev->rx_ring[0]);
3591                        status =
3592                            request_irq(pdev->irq, qlge_isr,
3593                                        test_bit(QL_MSI_ENABLED,
3594                                                 &qdev->
3595                                                 flags) ? 0 : IRQF_SHARED,
3596                                        intr_context->name, &qdev->rx_ring[0]);
3597                        if (status)
3598                                goto err_irq;
3599
3600                        netif_err(qdev, ifup, qdev->ndev,
3601                                  "Hooked intr %d, queue type %s, with name %s.\n",
3602                                  i,
3603                                  qdev->rx_ring[0].type == DEFAULT_Q ?
3604                                  "DEFAULT_Q" :
3605                                  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3606                                  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3607                                  intr_context->name);
3608                }
3609                intr_context->hooked = 1;
3610        }
3611        return status;
3612err_irq:
3613        netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3614        ql_free_irq(qdev);
3615        return status;
3616}
3617
3618static int ql_start_rss(struct ql_adapter *qdev)
3619{
3620        static const u8 init_hash_seed[] = {
3621                0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3622                0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3623                0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3624                0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3625                0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3626        };
3627        struct ricb *ricb = &qdev->ricb;
3628        int status = 0;
3629        int i;
3630        u8 *hash_id = (u8 *) ricb->hash_cq_id;
3631
3632        memset((void *)ricb, 0, sizeof(*ricb));
3633
3634        ricb->base_cq = RSS_L4K;
3635        ricb->flags =
3636                (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3637        ricb->mask = cpu_to_le16((u16)(0x3ff));
3638
3639        /*
3640         * Fill out the Indirection Table.
3641         */
3642        for (i = 0; i < 1024; i++)
3643                hash_id[i] = (i & (qdev->rss_ring_count - 1));
3644
3645        memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3646        memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3647
3648        status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3649        if (status) {
3650                netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3651                return status;
3652        }
3653        return status;
3654}
3655
3656static int ql_clear_routing_entries(struct ql_adapter *qdev)
3657{
3658        int i, status = 0;
3659
3660        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3661        if (status)
3662                return status;
3663        /* Clear all the entries in the routing table. */
3664        for (i = 0; i < 16; i++) {
3665                status = ql_set_routing_reg(qdev, i, 0, 0);
3666                if (status) {
3667                        netif_err(qdev, ifup, qdev->ndev,
3668                                  "Failed to init routing register for CAM packets.\n");
3669                        break;
3670                }
3671        }
3672        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3673        return status;
3674}
3675
3676/* Initialize the frame-to-queue routing. */
3677static int ql_route_initialize(struct ql_adapter *qdev)
3678{
3679        int status = 0;
3680
3681        /* Clear all the entries in the routing table. */
3682        status = ql_clear_routing_entries(qdev);
3683        if (status)
3684                return status;
3685
3686        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3687        if (status)
3688                return status;
3689
3690        status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3691                                                RT_IDX_IP_CSUM_ERR, 1);
3692        if (status) {
3693                netif_err(qdev, ifup, qdev->ndev,
3694                        "Failed to init routing register "
3695                        "for IP CSUM error packets.\n");
3696                goto exit;
3697        }
3698        status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3699                                                RT_IDX_TU_CSUM_ERR, 1);
3700        if (status) {
3701                netif_err(qdev, ifup, qdev->ndev,
3702                        "Failed to init routing register "
3703                        "for TCP/UDP CSUM error packets.\n");
3704                goto exit;
3705        }
3706        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3707        if (status) {
3708                netif_err(qdev, ifup, qdev->ndev,
3709                          "Failed to init routing register for broadcast packets.\n");
3710                goto exit;
3711        }
3712        /* If we have more than one inbound queue, then turn on RSS in the
3713         * routing block.
3714         */
3715        if (qdev->rss_ring_count > 1) {
3716                status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3717                                        RT_IDX_RSS_MATCH, 1);
3718                if (status) {
3719                        netif_err(qdev, ifup, qdev->ndev,
3720                                  "Failed to init routing register for MATCH RSS packets.\n");
3721                        goto exit;
3722                }
3723        }
3724
3725        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3726                                    RT_IDX_CAM_HIT, 1);
3727        if (status)
3728                netif_err(qdev, ifup, qdev->ndev,
3729                          "Failed to init routing register for CAM packets.\n");
3730exit:
3731        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3732        return status;
3733}
3734
3735int ql_cam_route_initialize(struct ql_adapter *qdev)
3736{
3737        int status, set;
3738
3739        /* If check if the link is up and use to
3740         * determine if we are setting or clearing
3741         * the MAC address in the CAM.
3742         */
3743        set = ql_read32(qdev, STS);
3744        set &= qdev->port_link_up;
3745        status = ql_set_mac_addr(qdev, set);
3746        if (status) {
3747                netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3748                return status;
3749        }
3750
3751        status = ql_route_initialize(qdev);
3752        if (status)
3753                netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3754
3755        return status;
3756}
3757
3758static int ql_adapter_initialize(struct ql_adapter *qdev)
3759{
3760        u32 value, mask;
3761        int i;
3762        int status = 0;
3763
3764        /*
3765         * Set up the System register to halt on errors.
3766         */
3767        value = SYS_EFE | SYS_FAE;
3768        mask = value << 16;
3769        ql_write32(qdev, SYS, mask | value);
3770
3771        /* Set the default queue, and VLAN behavior. */
3772        value = NIC_RCV_CFG_DFQ;
3773        mask = NIC_RCV_CFG_DFQ_MASK;
3774        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3775                value |= NIC_RCV_CFG_RV;
3776                mask |= (NIC_RCV_CFG_RV << 16);
3777        }
3778        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3779
3780        /* Set the MPI interrupt to enabled. */
3781        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3782
3783        /* Enable the function, set pagesize, enable error checking. */
3784        value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3785            FSC_EC | FSC_VM_PAGE_4K;
3786        value |= SPLT_SETTING;
3787
3788        /* Set/clear header splitting. */
3789        mask = FSC_VM_PAGESIZE_MASK |
3790            FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3791        ql_write32(qdev, FSC, mask | value);
3792
3793        ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3794
3795        /* Set RX packet routing to use port/pci function on which the
3796         * packet arrived on in addition to usual frame routing.
3797         * This is helpful on bonding where both interfaces can have
3798         * the same MAC address.
3799         */
3800        ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3801        /* Reroute all packets to our Interface.
3802         * They may have been routed to MPI firmware
3803         * due to WOL.
3804         */
3805        value = ql_read32(qdev, MGMT_RCV_CFG);
3806        value &= ~MGMT_RCV_CFG_RM;
3807        mask = 0xffff0000;
3808
3809        /* Sticky reg needs clearing due to WOL. */
3810        ql_write32(qdev, MGMT_RCV_CFG, mask);
3811        ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3812
3813        /* Default WOL is enable on Mezz cards */
3814        if (qdev->pdev->subsystem_device == 0x0068 ||
3815                        qdev->pdev->subsystem_device == 0x0180)
3816                qdev->wol = WAKE_MAGIC;
3817
3818        /* Start up the rx queues. */
3819        for (i = 0; i < qdev->rx_ring_count; i++) {
3820                status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3821                if (status) {
3822                        netif_err(qdev, ifup, qdev->ndev,
3823                                  "Failed to start rx ring[%d].\n", i);
3824                        return status;
3825                }
3826        }
3827
3828        /* If there is more than one inbound completion queue
3829         * then download a RICB to configure RSS.
3830         */
3831        if (qdev->rss_ring_count > 1) {
3832                status = ql_start_rss(qdev);
3833                if (status) {
3834                        netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3835                        return status;
3836                }
3837        }
3838
3839        /* Start up the tx queues. */
3840        for (i = 0; i < qdev->tx_ring_count; i++) {
3841                status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3842                if (status) {
3843                        netif_err(qdev, ifup, qdev->ndev,
3844                                  "Failed to start tx ring[%d].\n", i);
3845                        return status;
3846                }
3847        }
3848
3849        /* Initialize the port and set the max framesize. */
3850        status = qdev->nic_ops->port_initialize(qdev);
3851        if (status)
3852                netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3853
3854        /* Set up the MAC address and frame routing filter. */
3855        status = ql_cam_route_initialize(qdev);
3856        if (status) {
3857                netif_err(qdev, ifup, qdev->ndev,
3858                          "Failed to init CAM/Routing tables.\n");
3859                return status;
3860        }
3861
3862        /* Start NAPI for the RSS queues. */
3863        for (i = 0; i < qdev->rss_ring_count; i++)
3864                napi_enable(&qdev->rx_ring[i].napi);
3865
3866        return status;
3867}
3868
3869/* Issue soft reset to chip. */
3870static int ql_adapter_reset(struct ql_adapter *qdev)
3871{
3872        u32 value;
3873        int status = 0;
3874        unsigned long end_jiffies;
3875
3876        /* Clear all the entries in the routing table. */
3877        status = ql_clear_routing_entries(qdev);
3878        if (status) {
3879                netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3880                return status;
3881        }
3882
3883        /* Check if bit is set then skip the mailbox command and
3884         * clear the bit, else we are in normal reset process.
3885         */
3886        if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3887                /* Stop management traffic. */
3888                ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3889
3890                /* Wait for the NIC and MGMNT FIFOs to empty. */
3891                ql_wait_fifo_empty(qdev);
3892        } else
3893                clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3894
3895        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3896
3897        end_jiffies = jiffies + usecs_to_jiffies(30);
3898        do {
3899                value = ql_read32(qdev, RST_FO);
3900                if ((value & RST_FO_FR) == 0)
3901                        break;
3902                cpu_relax();
3903        } while (time_before(jiffies, end_jiffies));
3904
3905        if (value & RST_FO_FR) {
3906                netif_err(qdev, ifdown, qdev->ndev,
3907                          "ETIMEDOUT!!! errored out of resetting the chip!\n");
3908                status = -ETIMEDOUT;
3909        }
3910
3911        /* Resume management traffic. */
3912        ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3913        return status;
3914}
3915
3916static void ql_display_dev_info(struct net_device *ndev)
3917{
3918        struct ql_adapter *qdev = netdev_priv(ndev);
3919
3920        netif_info(qdev, probe, qdev->ndev,
3921                   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3922                   "XG Roll = %d, XG Rev = %d.\n",
3923                   qdev->func,
3924                   qdev->port,
3925                   qdev->chip_rev_id & 0x0000000f,
3926                   qdev->chip_rev_id >> 4 & 0x0000000f,
3927                   qdev->chip_rev_id >> 8 & 0x0000000f,
3928                   qdev->chip_rev_id >> 12 & 0x0000000f);
3929        netif_info(qdev, probe, qdev->ndev,
3930                   "MAC address %pM\n", ndev->dev_addr);
3931}
3932
3933static int ql_wol(struct ql_adapter *qdev)
3934{
3935        int status = 0;
3936        u32 wol = MB_WOL_DISABLE;
3937
3938        /* The CAM is still intact after a reset, but if we
3939         * are doing WOL, then we may need to program the
3940         * routing regs. We would also need to issue the mailbox
3941         * commands to instruct the MPI what to do per the ethtool
3942         * settings.
3943         */
3944
3945        if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3946                        WAKE_MCAST | WAKE_BCAST)) {
3947                netif_err(qdev, ifdown, qdev->ndev,
3948                          "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3949                          qdev->wol);
3950                return -EINVAL;
3951        }
3952
3953        if (qdev->wol & WAKE_MAGIC) {
3954                status = ql_mb_wol_set_magic(qdev, 1);
3955                if (status) {
3956                        netif_err(qdev, ifdown, qdev->ndev,
3957                                  "Failed to set magic packet on %s.\n",
3958                                  qdev->ndev->name);
3959                        return status;
3960                } else
3961                        netif_info(qdev, drv, qdev->ndev,
3962                                   "Enabled magic packet successfully on %s.\n",
3963                                   qdev->ndev->name);
3964
3965                wol |= MB_WOL_MAGIC_PKT;
3966        }
3967
3968        if (qdev->wol) {
3969                wol |= MB_WOL_MODE_ON;
3970                status = ql_mb_wol_mode(qdev, wol);
3971                netif_err(qdev, drv, qdev->ndev,
3972                          "WOL %s (wol code 0x%x) on %s\n",
3973                          (status == 0) ? "Successfully set" : "Failed",
3974                          wol, qdev->ndev->name);
3975        }
3976
3977        return status;
3978}
3979
3980static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3981{
3982
3983        /* Don't kill the reset worker thread if we
3984         * are in the process of recovery.
3985         */
3986        if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3987                cancel_delayed_work_sync(&qdev->asic_reset_work);
3988        cancel_delayed_work_sync(&qdev->mpi_reset_work);
3989        cancel_delayed_work_sync(&qdev->mpi_work);
3990        cancel_delayed_work_sync(&qdev->mpi_idc_work);
3991        cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3992        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3993}
3994
3995static int ql_adapter_down(struct ql_adapter *qdev)
3996{
3997        int i, status = 0;
3998
3999        ql_link_off(qdev);
4000
4001        ql_cancel_all_work_sync(qdev);
4002
4003        for (i = 0; i < qdev->rss_ring_count; i++)
4004                napi_disable(&qdev->rx_ring[i].napi);
4005
4006        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4007
4008        ql_disable_interrupts(qdev);
4009
4010        ql_tx_ring_clean(qdev);
4011
4012        /* Call netif_napi_del() from common point.
4013         */
4014        for (i = 0; i < qdev->rss_ring_count; i++)
4015                netif_napi_del(&qdev->rx_ring[i].napi);
4016
4017        status = ql_adapter_reset(qdev);
4018        if (status)
4019                netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4020                          qdev->func);
4021        ql_free_rx_buffers(qdev);
4022
4023        return status;
4024}
4025
4026static int ql_adapter_up(struct ql_adapter *qdev)
4027{
4028        int err = 0;
4029
4030        err = ql_adapter_initialize(qdev);
4031        if (err) {
4032                netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4033                goto err_init;
4034        }
4035        set_bit(QL_ADAPTER_UP, &qdev->flags);
4036        ql_alloc_rx_buffers(qdev);
4037        /* If the port is initialized and the
4038         * link is up the turn on the carrier.
4039         */
4040        if ((ql_read32(qdev, STS) & qdev->port_init) &&
4041                        (ql_read32(qdev, STS) & qdev->port_link_up))
4042                ql_link_on(qdev);
4043        /* Restore rx mode. */
4044        clear_bit(QL_ALLMULTI, &qdev->flags);
4045        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4046        qlge_set_multicast_list(qdev->ndev);
4047
4048        /* Restore vlan setting. */
4049        qlge_restore_vlan(qdev);
4050
4051        ql_enable_interrupts(qdev);
4052        ql_enable_all_completion_interrupts(qdev);
4053        netif_tx_start_all_queues(qdev->ndev);
4054
4055        return 0;
4056err_init:
4057        ql_adapter_reset(qdev);
4058        return err;
4059}
4060
4061static void ql_release_adapter_resources(struct ql_adapter *qdev)
4062{
4063        ql_free_mem_resources(qdev);
4064        ql_free_irq(qdev);
4065}
4066
4067static int ql_get_adapter_resources(struct ql_adapter *qdev)
4068{
4069        int status = 0;
4070
4071        if (ql_alloc_mem_resources(qdev)) {
4072                netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4073                return -ENOMEM;
4074        }
4075        status = ql_request_irq(qdev);
4076        return status;
4077}
4078
4079static int qlge_close(struct net_device *ndev)
4080{
4081        struct ql_adapter *qdev = netdev_priv(ndev);
4082
4083        /* If we hit pci_channel_io_perm_failure
4084         * failure condition, then we already
4085         * brought the adapter down.
4086         */
4087        if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4088                netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4089                clear_bit(QL_EEH_FATAL, &qdev->flags);
4090                return 0;
4091        }
4092
4093        /*
4094         * Wait for device to recover from a reset.
4095         * (Rarely happens, but possible.)
4096         */
4097        while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4098                msleep(1);
4099        ql_adapter_down(qdev);
4100        ql_release_adapter_resources(qdev);
4101        return 0;
4102}
4103
4104static int ql_configure_rings(struct ql_adapter *qdev)
4105{
4106        int i;
4107        struct rx_ring *rx_ring;
4108        struct tx_ring *tx_ring;
4109        int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4110        unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4111                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4112
4113        qdev->lbq_buf_order = get_order(lbq_buf_len);
4114
4115        /* In a perfect world we have one RSS ring for each CPU
4116         * and each has it's own vector.  To do that we ask for
4117         * cpu_cnt vectors.  ql_enable_msix() will adjust the
4118         * vector count to what we actually get.  We then
4119         * allocate an RSS ring for each.
4120         * Essentially, we are doing min(cpu_count, msix_vector_count).
4121         */
4122        qdev->intr_count = cpu_cnt;
4123        ql_enable_msix(qdev);
4124        /* Adjust the RSS ring count to the actual vector count. */
4125        qdev->rss_ring_count = qdev->intr_count;
4126        qdev->tx_ring_count = cpu_cnt;
4127        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4128
4129        for (i = 0; i < qdev->tx_ring_count; i++) {
4130                tx_ring = &qdev->tx_ring[i];
4131                memset((void *)tx_ring, 0, sizeof(*tx_ring));
4132                tx_ring->qdev = qdev;
4133                tx_ring->wq_id = i;
4134                tx_ring->wq_len = qdev->tx_ring_size;
4135                tx_ring->wq_size =
4136                    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4137
4138                /*
4139                 * The completion queue ID for the tx rings start
4140                 * immediately after the rss rings.
4141                 */
4142                tx_ring->cq_id = qdev->rss_ring_count + i;
4143        }
4144
4145        for (i = 0; i < qdev->rx_ring_count; i++) {
4146                rx_ring = &qdev->rx_ring[i];
4147                memset((void *)rx_ring, 0, sizeof(*rx_ring));
4148                rx_ring->qdev = qdev;
4149                rx_ring->cq_id = i;
4150                rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4151                if (i < qdev->rss_ring_count) {
4152                        /*
4153                         * Inbound (RSS) queues.
4154                         */
4155                        rx_ring->cq_len = qdev->rx_ring_size;
4156                        rx_ring->cq_size =
4157                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4158                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4159                        rx_ring->lbq_size =
4160                            rx_ring->lbq_len * sizeof(__le64);
4161                        rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4162                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4163                        rx_ring->sbq_size =
4164                            rx_ring->sbq_len * sizeof(__le64);
4165                        rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4166                        rx_ring->type = RX_Q;
4167                } else {
4168                        /*
4169                         * Outbound queue handles outbound completions only.
4170                         */
4171                        /* outbound cq is same size as tx_ring it services. */
4172                        rx_ring->cq_len = qdev->tx_ring_size;
4173                        rx_ring->cq_size =
4174                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4175                        rx_ring->lbq_len = 0;
4176                        rx_ring->lbq_size = 0;
4177                        rx_ring->lbq_buf_size = 0;
4178                        rx_ring->sbq_len = 0;
4179                        rx_ring->sbq_size = 0;
4180                        rx_ring->sbq_buf_size = 0;
4181                        rx_ring->type = TX_Q;
4182                }
4183        }
4184        return 0;
4185}
4186
4187static int qlge_open(struct net_device *ndev)
4188{
4189        int err = 0;
4190        struct ql_adapter *qdev = netdev_priv(ndev);
4191
4192        err = ql_adapter_reset(qdev);
4193        if (err)
4194                return err;
4195
4196        err = ql_configure_rings(qdev);
4197        if (err)
4198                return err;
4199
4200        err = ql_get_adapter_resources(qdev);
4201        if (err)
4202                goto error_up;
4203
4204        err = ql_adapter_up(qdev);
4205        if (err)
4206                goto error_up;
4207
4208        return err;
4209
4210error_up:
4211        ql_release_adapter_resources(qdev);
4212        return err;
4213}
4214
4215static int ql_change_rx_buffers(struct ql_adapter *qdev)
4216{
4217        struct rx_ring *rx_ring;
4218        int i, status;
4219        u32 lbq_buf_len;
4220
4221        /* Wait for an outstanding reset to complete. */
4222        if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4223                int i = 4;
4224
4225                while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4226                        netif_err(qdev, ifup, qdev->ndev,
4227                                  "Waiting for adapter UP...\n");
4228                        ssleep(1);
4229                }
4230
4231                if (!i) {
4232                        netif_err(qdev, ifup, qdev->ndev,
4233                                  "Timed out waiting for adapter UP\n");
4234                        return -ETIMEDOUT;
4235                }
4236        }
4237
4238        status = ql_adapter_down(qdev);
4239        if (status)
4240                goto error;
4241
4242        /* Get the new rx buffer size. */
4243        lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4244                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4245        qdev->lbq_buf_order = get_order(lbq_buf_len);
4246
4247        for (i = 0; i < qdev->rss_ring_count; i++) {
4248                rx_ring = &qdev->rx_ring[i];
4249                /* Set the new size. */
4250                rx_ring->lbq_buf_size = lbq_buf_len;
4251        }
4252
4253        status = ql_adapter_up(qdev);
4254        if (status)
4255                goto error;
4256
4257        return status;
4258error:
4259        netif_alert(qdev, ifup, qdev->ndev,
4260                    "Driver up/down cycle failed, closing device.\n");
4261        set_bit(QL_ADAPTER_UP, &qdev->flags);
4262        dev_close(qdev->ndev);
4263        return status;
4264}
4265
4266static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4267{
4268        struct ql_adapter *qdev = netdev_priv(ndev);
4269        int status;
4270
4271        if (ndev->mtu == 1500 && new_mtu == 9000) {
4272                netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4273        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4274                netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4275        } else
4276                return -EINVAL;
4277
4278        queue_delayed_work(qdev->workqueue,
4279                        &qdev->mpi_port_cfg_work, 3*HZ);
4280
4281        ndev->mtu = new_mtu;
4282
4283        if (!netif_running(qdev->ndev)) {
4284                return 0;
4285        }
4286
4287        status = ql_change_rx_buffers(qdev);
4288        if (status) {
4289                netif_err(qdev, ifup, qdev->ndev,
4290                          "Changing MTU failed.\n");
4291        }
4292
4293        return status;
4294}
4295
4296static struct net_device_stats *qlge_get_stats(struct net_device
4297                                               *ndev)
4298{
4299        struct ql_adapter *qdev = netdev_priv(ndev);
4300        struct rx_ring *rx_ring = &qdev->rx_ring[0];
4301        struct tx_ring *tx_ring = &qdev->tx_ring[0];
4302        unsigned long pkts, mcast, dropped, errors, bytes;
4303        int i;
4304
4305        /* Get RX stats. */
4306        pkts = mcast = dropped = errors = bytes = 0;
4307        for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4308                        pkts += rx_ring->rx_packets;
4309                        bytes += rx_ring->rx_bytes;
4310                        dropped += rx_ring->rx_dropped;
4311                        errors += rx_ring->rx_errors;
4312                        mcast += rx_ring->rx_multicast;
4313        }
4314        ndev->stats.rx_packets = pkts;
4315        ndev->stats.rx_bytes = bytes;
4316        ndev->stats.rx_dropped = dropped;
4317        ndev->stats.rx_errors = errors;
4318        ndev->stats.multicast = mcast;
4319
4320        /* Get TX stats. */
4321        pkts = errors = bytes = 0;
4322        for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4323                        pkts += tx_ring->tx_packets;
4324                        bytes += tx_ring->tx_bytes;
4325                        errors += tx_ring->tx_errors;
4326        }
4327        ndev->stats.tx_packets = pkts;
4328        ndev->stats.tx_bytes = bytes;
4329        ndev->stats.tx_errors = errors;
4330        return &ndev->stats;
4331}
4332
4333static void qlge_set_multicast_list(struct net_device *ndev)
4334{
4335        struct ql_adapter *qdev = netdev_priv(ndev);
4336        struct netdev_hw_addr *ha;
4337        int i, status;
4338
4339        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4340        if (status)
4341                return;
4342        /*
4343         * Set or clear promiscuous mode if a
4344         * transition is taking place.
4345         */
4346        if (ndev->flags & IFF_PROMISC) {
4347                if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4348                        if (ql_set_routing_reg
4349                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4350                                netif_err(qdev, hw, qdev->ndev,
4351                                          "Failed to set promiscuous mode.\n");
4352                        } else {
4353                                set_bit(QL_PROMISCUOUS, &qdev->flags);
4354                        }
4355                }
4356        } else {
4357                if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4358                        if (ql_set_routing_reg
4359                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4360                                netif_err(qdev, hw, qdev->ndev,
4361                                          "Failed to clear promiscuous mode.\n");
4362                        } else {
4363                                clear_bit(QL_PROMISCUOUS, &qdev->flags);
4364                        }
4365                }
4366        }
4367
4368        /*
4369         * Set or clear all multicast mode if a
4370         * transition is taking place.
4371         */
4372        if ((ndev->flags & IFF_ALLMULTI) ||
4373            (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4374                if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4375                        if (ql_set_routing_reg
4376                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4377                                netif_err(qdev, hw, qdev->ndev,
4378                                          "Failed to set all-multi mode.\n");
4379                        } else {
4380                                set_bit(QL_ALLMULTI, &qdev->flags);
4381                        }
4382                }
4383        } else {
4384                if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4385                        if (ql_set_routing_reg
4386                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4387                                netif_err(qdev, hw, qdev->ndev,
4388                                          "Failed to clear all-multi mode.\n");
4389                        } else {
4390                                clear_bit(QL_ALLMULTI, &qdev->flags);
4391                        }
4392                }
4393        }
4394
4395        if (!netdev_mc_empty(ndev)) {
4396                status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4397                if (status)
4398                        goto exit;
4399                i = 0;
4400                netdev_for_each_mc_addr(ha, ndev) {
4401                        if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4402                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
4403                                netif_err(qdev, hw, qdev->ndev,
4404                                          "Failed to loadmulticast address.\n");
4405                                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4406                                goto exit;
4407                        }
4408                        i++;
4409                }
4410                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4411                if (ql_set_routing_reg
4412                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4413                        netif_err(qdev, hw, qdev->ndev,
4414                                  "Failed to set multicast match mode.\n");
4415                } else {
4416                        set_bit(QL_ALLMULTI, &qdev->flags);
4417                }
4418        }
4419exit:
4420        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4421}
4422
4423static int qlge_set_mac_address(struct net_device *ndev, void *p)
4424{
4425        struct ql_adapter *qdev = netdev_priv(ndev);
4426        struct sockaddr *addr = p;
4427        int status;
4428
4429        if (!is_valid_ether_addr(addr->sa_data))
4430                return -EADDRNOTAVAIL;
4431        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4432        /* Update local copy of current mac address. */
4433        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4434
4435        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4436        if (status)
4437                return status;
4438        status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4439                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4440        if (status)
4441                netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4442        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4443        return status;
4444}
4445
4446static void qlge_tx_timeout(struct net_device *ndev)
4447{
4448        struct ql_adapter *qdev = netdev_priv(ndev);
4449        ql_queue_asic_error(qdev);
4450}
4451
4452static void ql_asic_reset_work(struct work_struct *work)
4453{
4454        struct ql_adapter *qdev =
4455            container_of(work, struct ql_adapter, asic_reset_work.work);
4456        int status;
4457        rtnl_lock();
4458        status = ql_adapter_down(qdev);
4459        if (status)
4460                goto error;
4461
4462        status = ql_adapter_up(qdev);
4463        if (status)
4464                goto error;
4465
4466        /* Restore rx mode. */
4467        clear_bit(QL_ALLMULTI, &qdev->flags);
4468        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4469        qlge_set_multicast_list(qdev->ndev);
4470
4471        rtnl_unlock();
4472        return;
4473error:
4474        netif_alert(qdev, ifup, qdev->ndev,
4475                    "Driver up/down cycle failed, closing device\n");
4476
4477        set_bit(QL_ADAPTER_UP, &qdev->flags);
4478        dev_close(qdev->ndev);
4479        rtnl_unlock();
4480}
4481
4482static const struct nic_operations qla8012_nic_ops = {
4483        .get_flash              = ql_get_8012_flash_params,
4484        .port_initialize        = ql_8012_port_initialize,
4485};
4486
4487static const struct nic_operations qla8000_nic_ops = {
4488        .get_flash              = ql_get_8000_flash_params,
4489        .port_initialize        = ql_8000_port_initialize,
4490};
4491
4492/* Find the pcie function number for the other NIC
4493 * on this chip.  Since both NIC functions share a
4494 * common firmware we have the lowest enabled function
4495 * do any common work.  Examples would be resetting
4496 * after a fatal firmware error, or doing a firmware
4497 * coredump.
4498 */
4499static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4500{
4501        int status = 0;
4502        u32 temp;
4503        u32 nic_func1, nic_func2;
4504
4505        status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4506                        &temp);
4507        if (status)
4508                return status;
4509
4510        nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4511                        MPI_TEST_NIC_FUNC_MASK);
4512        nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4513                        MPI_TEST_NIC_FUNC_MASK);
4514
4515        if (qdev->func == nic_func1)
4516                qdev->alt_func = nic_func2;
4517        else if (qdev->func == nic_func2)
4518                qdev->alt_func = nic_func1;
4519        else
4520                status = -EIO;
4521
4522        return status;
4523}
4524
4525static int ql_get_board_info(struct ql_adapter *qdev)
4526{
4527        int status;
4528        qdev->func =
4529            (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4530        if (qdev->func > 3)
4531                return -EIO;
4532
4533        status = ql_get_alt_pcie_func(qdev);
4534        if (status)
4535                return status;
4536
4537        qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4538        if (qdev->port) {
4539                qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4540                qdev->port_link_up = STS_PL1;
4541                qdev->port_init = STS_PI1;
4542                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4543                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4544        } else {
4545                qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4546                qdev->port_link_up = STS_PL0;
4547                qdev->port_init = STS_PI0;
4548                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4549                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4550        }
4551        qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4552        qdev->device_id = qdev->pdev->device;
4553        if (qdev->device_id == QLGE_DEVICE_ID_8012)
4554                qdev->nic_ops = &qla8012_nic_ops;
4555        else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4556                qdev->nic_ops = &qla8000_nic_ops;
4557        return status;
4558}
4559
4560static void ql_release_all(struct pci_dev *pdev)
4561{
4562        struct net_device *ndev = pci_get_drvdata(pdev);
4563        struct ql_adapter *qdev = netdev_priv(ndev);
4564
4565        if (qdev->workqueue) {
4566                destroy_workqueue(qdev->workqueue);
4567                qdev->workqueue = NULL;
4568        }
4569
4570        if (qdev->reg_base)
4571                iounmap(qdev->reg_base);
4572        if (qdev->doorbell_area)
4573                iounmap(qdev->doorbell_area);
4574        vfree(qdev->mpi_coredump);
4575        pci_release_regions(pdev);
4576}
4577
4578static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4579                          int cards_found)
4580{
4581        struct ql_adapter *qdev = netdev_priv(ndev);
4582        int err = 0;
4583
4584        memset((void *)qdev, 0, sizeof(*qdev));
4585        err = pci_enable_device(pdev);
4586        if (err) {
4587                dev_err(&pdev->dev, "PCI device enable failed.\n");
4588                return err;
4589        }
4590
4591        qdev->ndev = ndev;
4592        qdev->pdev = pdev;
4593        pci_set_drvdata(pdev, ndev);
4594
4595        /* Set PCIe read request size */
4596        err = pcie_set_readrq(pdev, 4096);
4597        if (err) {
4598                dev_err(&pdev->dev, "Set readrq failed.\n");
4599                goto err_out1;
4600        }
4601
4602        err = pci_request_regions(pdev, DRV_NAME);
4603        if (err) {
4604                dev_err(&pdev->dev, "PCI region request failed.\n");
4605                return err;
4606        }
4607
4608        pci_set_master(pdev);
4609        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4610                set_bit(QL_DMA64, &qdev->flags);
4611                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4612        } else {
4613                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4614                if (!err)
4615                       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4616        }
4617
4618        if (err) {
4619                dev_err(&pdev->dev, "No usable DMA configuration.\n");
4620                goto err_out2;
4621        }
4622
4623        /* Set PCIe reset type for EEH to fundamental. */
4624        pdev->needs_freset = 1;
4625        pci_save_state(pdev);
4626        qdev->reg_base =
4627            ioremap_nocache(pci_resource_start(pdev, 1),
4628                            pci_resource_len(pdev, 1));
4629        if (!qdev->reg_base) {
4630                dev_err(&pdev->dev, "Register mapping failed.\n");
4631                err = -ENOMEM;
4632                goto err_out2;
4633        }
4634
4635        qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4636        qdev->doorbell_area =
4637            ioremap_nocache(pci_resource_start(pdev, 3),
4638                            pci_resource_len(pdev, 3));
4639        if (!qdev->doorbell_area) {
4640                dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4641                err = -ENOMEM;
4642                goto err_out2;
4643        }
4644
4645        err = ql_get_board_info(qdev);
4646        if (err) {
4647                dev_err(&pdev->dev, "Register access failed.\n");
4648                err = -EIO;
4649                goto err_out2;
4650        }
4651        qdev->msg_enable = netif_msg_init(debug, default_msg);
4652        spin_lock_init(&qdev->hw_lock);
4653        spin_lock_init(&qdev->stats_lock);
4654
4655        if (qlge_mpi_coredump) {
4656                qdev->mpi_coredump =
4657                        vmalloc(sizeof(struct ql_mpi_coredump));
4658                if (qdev->mpi_coredump == NULL) {
4659                        err = -ENOMEM;
4660                        goto err_out2;
4661                }
4662                if (qlge_force_coredump)
4663                        set_bit(QL_FRC_COREDUMP, &qdev->flags);
4664        }
4665        /* make sure the EEPROM is good */
4666        err = qdev->nic_ops->get_flash(qdev);
4667        if (err) {
4668                dev_err(&pdev->dev, "Invalid FLASH.\n");
4669                goto err_out2;
4670        }
4671
4672        /* Keep local copy of current mac address. */
4673        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4674
4675        /* Set up the default ring sizes. */
4676        qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4677        qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4678
4679        /* Set up the coalescing parameters. */
4680        qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4681        qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4682        qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4683        qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4684
4685        /*
4686         * Set up the operating parameters.
4687         */
4688        qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4689                                                  ndev->name);
4690        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4691        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4692        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4693        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4694        INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4695        INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4696        init_completion(&qdev->ide_completion);
4697        mutex_init(&qdev->mpi_mutex);
4698
4699        if (!cards_found) {
4700                dev_info(&pdev->dev, "%s\n", DRV_STRING);
4701                dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4702                         DRV_NAME, DRV_VERSION);
4703        }
4704        return 0;
4705err_out2:
4706        ql_release_all(pdev);
4707err_out1:
4708        pci_disable_device(pdev);
4709        return err;
4710}
4711
4712static const struct net_device_ops qlge_netdev_ops = {
4713        .ndo_open               = qlge_open,
4714        .ndo_stop               = qlge_close,
4715        .ndo_start_xmit         = qlge_send,
4716        .ndo_change_mtu         = qlge_change_mtu,
4717        .ndo_get_stats          = qlge_get_stats,
4718        .ndo_set_rx_mode        = qlge_set_multicast_list,
4719        .ndo_set_mac_address    = qlge_set_mac_address,
4720        .ndo_validate_addr      = eth_validate_addr,
4721        .ndo_tx_timeout         = qlge_tx_timeout,
4722        .ndo_fix_features       = qlge_fix_features,
4723        .ndo_set_features       = qlge_set_features,
4724        .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4725        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4726};
4727
4728static void ql_timer(unsigned long data)
4729{
4730        struct ql_adapter *qdev = (struct ql_adapter *)data;
4731        u32 var = 0;
4732
4733        var = ql_read32(qdev, STS);
4734        if (pci_channel_offline(qdev->pdev)) {
4735                netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4736                return;
4737        }
4738
4739        mod_timer(&qdev->timer, jiffies + (5*HZ));
4740}
4741
4742static int qlge_probe(struct pci_dev *pdev,
4743                      const struct pci_device_id *pci_entry)
4744{
4745        struct net_device *ndev = NULL;
4746        struct ql_adapter *qdev = NULL;
4747        static int cards_found = 0;
4748        int err = 0;
4749
4750        ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4751                        min(MAX_CPUS, netif_get_num_default_rss_queues()));
4752        if (!ndev)
4753                return -ENOMEM;
4754
4755        err = ql_init_device(pdev, ndev, cards_found);
4756        if (err < 0) {
4757                free_netdev(ndev);
4758                return err;
4759        }
4760
4761        qdev = netdev_priv(ndev);
4762        SET_NETDEV_DEV(ndev, &pdev->dev);
4763        ndev->hw_features = NETIF_F_SG |
4764                            NETIF_F_IP_CSUM |
4765                            NETIF_F_TSO |
4766                            NETIF_F_TSO_ECN |
4767                            NETIF_F_HW_VLAN_CTAG_TX |
4768                            NETIF_F_HW_VLAN_CTAG_RX |
4769                            NETIF_F_HW_VLAN_CTAG_FILTER |
4770                            NETIF_F_RXCSUM;
4771        ndev->features = ndev->hw_features;
4772        ndev->vlan_features = ndev->hw_features;
4773        /* vlan gets same features (except vlan filter) */
4774        ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4775                                 NETIF_F_HW_VLAN_CTAG_TX |
4776                                 NETIF_F_HW_VLAN_CTAG_RX);
4777
4778        if (test_bit(QL_DMA64, &qdev->flags))
4779                ndev->features |= NETIF_F_HIGHDMA;
4780
4781        /*
4782         * Set up net_device structure.
4783         */
4784        ndev->tx_queue_len = qdev->tx_ring_size;
4785        ndev->irq = pdev->irq;
4786
4787        ndev->netdev_ops = &qlge_netdev_ops;
4788        ndev->ethtool_ops = &qlge_ethtool_ops;
4789        ndev->watchdog_timeo = 10 * HZ;
4790
4791        /* MTU range: this driver only supports 1500 or 9000, so this only
4792         * filters out values above or below, and we'll rely on
4793         * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4794         */
4795        ndev->min_mtu = ETH_DATA_LEN;
4796        ndev->max_mtu = 9000;
4797
4798        err = register_netdev(ndev);
4799        if (err) {
4800                dev_err(&pdev->dev, "net device registration failed.\n");
4801                ql_release_all(pdev);
4802                pci_disable_device(pdev);
4803                free_netdev(ndev);
4804                return err;
4805        }
4806        /* Start up the timer to trigger EEH if
4807         * the bus goes dead
4808         */
4809        init_timer_deferrable(&qdev->timer);
4810        qdev->timer.data = (unsigned long)qdev;
4811        qdev->timer.function = ql_timer;
4812        qdev->timer.expires = jiffies + (5*HZ);
4813        add_timer(&qdev->timer);
4814        ql_link_off(qdev);
4815        ql_display_dev_info(ndev);
4816        atomic_set(&qdev->lb_count, 0);
4817        cards_found++;
4818        return 0;
4819}
4820
4821netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4822{
4823        return qlge_send(skb, ndev);
4824}
4825
4826int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4827{
4828        return ql_clean_inbound_rx_ring(rx_ring, budget);
4829}
4830
4831static void qlge_remove(struct pci_dev *pdev)
4832{
4833        struct net_device *ndev = pci_get_drvdata(pdev);
4834        struct ql_adapter *qdev = netdev_priv(ndev);
4835        del_timer_sync(&qdev->timer);
4836        ql_cancel_all_work_sync(qdev);
4837        unregister_netdev(ndev);
4838        ql_release_all(pdev);
4839        pci_disable_device(pdev);
4840        free_netdev(ndev);
4841}
4842
4843/* Clean up resources without touching hardware. */
4844static void ql_eeh_close(struct net_device *ndev)
4845{
4846        int i;
4847        struct ql_adapter *qdev = netdev_priv(ndev);
4848
4849        if (netif_carrier_ok(ndev)) {
4850                netif_carrier_off(ndev);
4851                netif_stop_queue(ndev);
4852        }
4853
4854        /* Disabling the timer */
4855        ql_cancel_all_work_sync(qdev);
4856
4857        for (i = 0; i < qdev->rss_ring_count; i++)
4858                netif_napi_del(&qdev->rx_ring[i].napi);
4859
4860        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4861        ql_tx_ring_clean(qdev);
4862        ql_free_rx_buffers(qdev);
4863        ql_release_adapter_resources(qdev);
4864}
4865
4866/*
4867 * This callback is called by the PCI subsystem whenever
4868 * a PCI bus error is detected.
4869 */
4870static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4871                                               enum pci_channel_state state)
4872{
4873        struct net_device *ndev = pci_get_drvdata(pdev);
4874        struct ql_adapter *qdev = netdev_priv(ndev);
4875
4876        switch (state) {
4877        case pci_channel_io_normal:
4878                return PCI_ERS_RESULT_CAN_RECOVER;
4879        case pci_channel_io_frozen:
4880                netif_device_detach(ndev);
4881                del_timer_sync(&qdev->timer);
4882                if (netif_running(ndev))
4883                        ql_eeh_close(ndev);
4884                pci_disable_device(pdev);
4885                return PCI_ERS_RESULT_NEED_RESET;
4886        case pci_channel_io_perm_failure:
4887                dev_err(&pdev->dev,
4888                        "%s: pci_channel_io_perm_failure.\n", __func__);
4889                del_timer_sync(&qdev->timer);
4890                ql_eeh_close(ndev);
4891                set_bit(QL_EEH_FATAL, &qdev->flags);
4892                return PCI_ERS_RESULT_DISCONNECT;
4893        }
4894
4895        /* Request a slot reset. */
4896        return PCI_ERS_RESULT_NEED_RESET;
4897}
4898
4899/*
4900 * This callback is called after the PCI buss has been reset.
4901 * Basically, this tries to restart the card from scratch.
4902 * This is a shortened version of the device probe/discovery code,
4903 * it resembles the first-half of the () routine.
4904 */
4905static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4906{
4907        struct net_device *ndev = pci_get_drvdata(pdev);
4908        struct ql_adapter *qdev = netdev_priv(ndev);
4909
4910        pdev->error_state = pci_channel_io_normal;
4911
4912        pci_restore_state(pdev);
4913        if (pci_enable_device(pdev)) {
4914                netif_err(qdev, ifup, qdev->ndev,
4915                          "Cannot re-enable PCI device after reset.\n");
4916                return PCI_ERS_RESULT_DISCONNECT;
4917        }
4918        pci_set_master(pdev);
4919
4920        if (ql_adapter_reset(qdev)) {
4921                netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4922                set_bit(QL_EEH_FATAL, &qdev->flags);
4923                return PCI_ERS_RESULT_DISCONNECT;
4924        }
4925
4926        return PCI_ERS_RESULT_RECOVERED;
4927}
4928
4929static void qlge_io_resume(struct pci_dev *pdev)
4930{
4931        struct net_device *ndev = pci_get_drvdata(pdev);
4932        struct ql_adapter *qdev = netdev_priv(ndev);
4933        int err = 0;
4934
4935        if (netif_running(ndev)) {
4936                err = qlge_open(ndev);
4937                if (err) {
4938                        netif_err(qdev, ifup, qdev->ndev,
4939                                  "Device initialization failed after reset.\n");
4940                        return;
4941                }
4942        } else {
4943                netif_err(qdev, ifup, qdev->ndev,
4944                          "Device was not running prior to EEH.\n");
4945        }
4946        mod_timer(&qdev->timer, jiffies + (5*HZ));
4947        netif_device_attach(ndev);
4948}
4949
4950static const struct pci_error_handlers qlge_err_handler = {
4951        .error_detected = qlge_io_error_detected,
4952        .slot_reset = qlge_io_slot_reset,
4953        .resume = qlge_io_resume,
4954};
4955
4956static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4957{
4958        struct net_device *ndev = pci_get_drvdata(pdev);
4959        struct ql_adapter *qdev = netdev_priv(ndev);
4960        int err;
4961
4962        netif_device_detach(ndev);
4963        del_timer_sync(&qdev->timer);
4964
4965        if (netif_running(ndev)) {
4966                err = ql_adapter_down(qdev);
4967                if (!err)
4968                        return err;
4969        }
4970
4971        ql_wol(qdev);
4972        err = pci_save_state(pdev);
4973        if (err)
4974                return err;
4975
4976        pci_disable_device(pdev);
4977
4978        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4979
4980        return 0;
4981}
4982
4983#ifdef CONFIG_PM
4984static int qlge_resume(struct pci_dev *pdev)
4985{
4986        struct net_device *ndev = pci_get_drvdata(pdev);
4987        struct ql_adapter *qdev = netdev_priv(ndev);
4988        int err;
4989
4990        pci_set_power_state(pdev, PCI_D0);
4991        pci_restore_state(pdev);
4992        err = pci_enable_device(pdev);
4993        if (err) {
4994                netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4995                return err;
4996        }
4997        pci_set_master(pdev);
4998
4999        pci_enable_wake(pdev, PCI_D3hot, 0);
5000        pci_enable_wake(pdev, PCI_D3cold, 0);
5001
5002        if (netif_running(ndev)) {
5003                err = ql_adapter_up(qdev);
5004                if (err)
5005                        return err;
5006        }
5007
5008        mod_timer(&qdev->timer, jiffies + (5*HZ));
5009        netif_device_attach(ndev);
5010
5011        return 0;
5012}
5013#endif /* CONFIG_PM */
5014
5015static void qlge_shutdown(struct pci_dev *pdev)
5016{
5017        qlge_suspend(pdev, PMSG_SUSPEND);
5018}
5019
5020static struct pci_driver qlge_driver = {
5021        .name = DRV_NAME,
5022        .id_table = qlge_pci_tbl,
5023        .probe = qlge_probe,
5024        .remove = qlge_remove,
5025#ifdef CONFIG_PM
5026        .suspend = qlge_suspend,
5027        .resume = qlge_resume,
5028#endif
5029        .shutdown = qlge_shutdown,
5030        .err_handler = &qlge_err_handler
5031};
5032
5033module_pci_driver(qlge_driver);
5034