linux/drivers/net/ethernet/qlogic/qlge/qlge_main.c
<<
>>
Prefs
   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/bitops.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/if_vlan.h>
  37#include <linux/skbuff.h>
  38#include <linux/delay.h>
  39#include <linux/mm.h>
  40#include <linux/vmalloc.h>
  41#include <linux/prefetch.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |    */
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61/*  NETIF_MSG_TX_QUEUED | */
  62/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = -1;  /* defaults above */
  67module_param(debug, int, 0664);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int qlge_irq_type = MSIX_IRQ;
  74module_param(qlge_irq_type, int, 0664);
  75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static int qlge_mpi_coredump;
  78module_param(qlge_mpi_coredump, int, 0);
  79MODULE_PARM_DESC(qlge_mpi_coredump,
  80                "Option to enable MPI firmware dump. "
  81                "Default is OFF - Do Not allocate memory. ");
  82
  83static int qlge_force_coredump;
  84module_param(qlge_force_coredump, int, 0);
  85MODULE_PARM_DESC(qlge_force_coredump,
  86                "Option to allow force of firmware core dump. "
  87                "Default is OFF - Do not allow.");
  88
  89static const struct pci_device_id qlge_pci_tbl[] = {
  90        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  91        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  92        /* required last entry */
  93        {0,}
  94};
  95
  96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  97
  98static int ql_wol(struct ql_adapter *);
  99static void qlge_set_multicast_list(struct net_device *);
 100static int ql_adapter_down(struct ql_adapter *);
 101static int ql_adapter_up(struct ql_adapter *);
 102
 103/* This hardware semaphore causes exclusive access to
 104 * resources shared between the NIC driver, MPI firmware,
 105 * FCOE firmware and the FC driver.
 106 */
 107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 108{
 109        u32 sem_bits = 0;
 110
 111        switch (sem_mask) {
 112        case SEM_XGMAC0_MASK:
 113                sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
 114                break;
 115        case SEM_XGMAC1_MASK:
 116                sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 117                break;
 118        case SEM_ICB_MASK:
 119                sem_bits = SEM_SET << SEM_ICB_SHIFT;
 120                break;
 121        case SEM_MAC_ADDR_MASK:
 122                sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 123                break;
 124        case SEM_FLASH_MASK:
 125                sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 126                break;
 127        case SEM_PROBE_MASK:
 128                sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 129                break;
 130        case SEM_RT_IDX_MASK:
 131                sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 132                break;
 133        case SEM_PROC_REG_MASK:
 134                sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 135                break;
 136        default:
 137                netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
 138                return -EINVAL;
 139        }
 140
 141        ql_write32(qdev, SEM, sem_bits | sem_mask);
 142        return !(ql_read32(qdev, SEM) & sem_bits);
 143}
 144
 145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 146{
 147        unsigned int wait_count = 30;
 148        do {
 149                if (!ql_sem_trylock(qdev, sem_mask))
 150                        return 0;
 151                udelay(100);
 152        } while (--wait_count);
 153        return -ETIMEDOUT;
 154}
 155
 156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 157{
 158        ql_write32(qdev, SEM, sem_mask);
 159        ql_read32(qdev, SEM);   /* flush */
 160}
 161
 162/* This function waits for a specific bit to come ready
 163 * in a given register.  It is used mostly by the initialize
 164 * process, but is also used in kernel thread API such as
 165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 166 */
 167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 168{
 169        u32 temp;
 170        int count = UDELAY_COUNT;
 171
 172        while (count) {
 173                temp = ql_read32(qdev, reg);
 174
 175                /* check for errors */
 176                if (temp & err_bit) {
 177                        netif_alert(qdev, probe, qdev->ndev,
 178                                    "register 0x%.08x access error, value = 0x%.08x!.\n",
 179                                    reg, temp);
 180                        return -EIO;
 181                } else if (temp & bit)
 182                        return 0;
 183                udelay(UDELAY_DELAY);
 184                count--;
 185        }
 186        netif_alert(qdev, probe, qdev->ndev,
 187                    "Timed out waiting for reg %x to come ready.\n", reg);
 188        return -ETIMEDOUT;
 189}
 190
 191/* The CFG register is used to download TX and RX control blocks
 192 * to the chip. This function waits for an operation to complete.
 193 */
 194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 195{
 196        int count = UDELAY_COUNT;
 197        u32 temp;
 198
 199        while (count) {
 200                temp = ql_read32(qdev, CFG);
 201                if (temp & CFG_LE)
 202                        return -EIO;
 203                if (!(temp & bit))
 204                        return 0;
 205                udelay(UDELAY_DELAY);
 206                count--;
 207        }
 208        return -ETIMEDOUT;
 209}
 210
 211
 212/* Used to issue init control blocks to hw. Maps control block,
 213 * sets address, triggers download, waits for completion.
 214 */
 215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 216                 u16 q_id)
 217{
 218        u64 map;
 219        int status = 0;
 220        int direction;
 221        u32 mask;
 222        u32 value;
 223
 224        direction =
 225            (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 226            PCI_DMA_FROMDEVICE;
 227
 228        map = pci_map_single(qdev->pdev, ptr, size, direction);
 229        if (pci_dma_mapping_error(qdev->pdev, map)) {
 230                netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
 231                return -ENOMEM;
 232        }
 233
 234        status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 235        if (status)
 236                return status;
 237
 238        status = ql_wait_cfg(qdev, bit);
 239        if (status) {
 240                netif_err(qdev, ifup, qdev->ndev,
 241                          "Timed out waiting for CFG to come ready.\n");
 242                goto exit;
 243        }
 244
 245        ql_write32(qdev, ICB_L, (u32) map);
 246        ql_write32(qdev, ICB_H, (u32) (map >> 32));
 247
 248        mask = CFG_Q_MASK | (bit << 16);
 249        value = bit | (q_id << CFG_Q_SHIFT);
 250        ql_write32(qdev, CFG, (mask | value));
 251
 252        /*
 253         * Wait for the bit to clear after signaling hw.
 254         */
 255        status = ql_wait_cfg(qdev, bit);
 256exit:
 257        ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
 258        pci_unmap_single(qdev->pdev, map, size, direction);
 259        return status;
 260}
 261
 262/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 264                        u32 *value)
 265{
 266        u32 offset = 0;
 267        int status;
 268
 269        switch (type) {
 270        case MAC_ADDR_TYPE_MULTI_MAC:
 271        case MAC_ADDR_TYPE_CAM_MAC:
 272                {
 273                        status =
 274                            ql_wait_reg_rdy(qdev,
 275                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 276                        if (status)
 277                                goto exit;
 278                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 279                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 280                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 281                        status =
 282                            ql_wait_reg_rdy(qdev,
 283                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 284                        if (status)
 285                                goto exit;
 286                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 287                        status =
 288                            ql_wait_reg_rdy(qdev,
 289                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 290                        if (status)
 291                                goto exit;
 292                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295                        status =
 296                            ql_wait_reg_rdy(qdev,
 297                                MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 298                        if (status)
 299                                goto exit;
 300                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
 302                                status =
 303                                    ql_wait_reg_rdy(qdev,
 304                                        MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 305                                if (status)
 306                                        goto exit;
 307                                ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 308                                           (index << MAC_ADDR_IDX_SHIFT) | /* index */
 309                                           MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 310                                status =
 311                                    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 312                                                    MAC_ADDR_MR, 0);
 313                                if (status)
 314                                        goto exit;
 315                                *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 316                        }
 317                        break;
 318                }
 319        case MAC_ADDR_TYPE_VLAN:
 320        case MAC_ADDR_TYPE_MULTI_FLTR:
 321        default:
 322                netif_crit(qdev, ifup, qdev->ndev,
 323                           "Address type %d not yet supported.\n", type);
 324                status = -EPERM;
 325        }
 326exit:
 327        return status;
 328}
 329
 330/* Set up a MAC, multicast or VLAN address for the
 331 * inbound frame matching.
 332 */
 333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 334                               u16 index)
 335{
 336        u32 offset = 0;
 337        int status = 0;
 338
 339        switch (type) {
 340        case MAC_ADDR_TYPE_MULTI_MAC:
 341                {
 342                        u32 upper = (addr[0] << 8) | addr[1];
 343                        u32 lower = (addr[2] << 24) | (addr[3] << 16) |
 344                                        (addr[4] << 8) | (addr[5]);
 345
 346                        status =
 347                                ql_wait_reg_rdy(qdev,
 348                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 349                        if (status)
 350                                goto exit;
 351                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 352                                (index << MAC_ADDR_IDX_SHIFT) |
 353                                type | MAC_ADDR_E);
 354                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 355                        status =
 356                                ql_wait_reg_rdy(qdev,
 357                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 358                        if (status)
 359                                goto exit;
 360                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 361                                (index << MAC_ADDR_IDX_SHIFT) |
 362                                type | MAC_ADDR_E);
 363
 364                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 365                        status =
 366                                ql_wait_reg_rdy(qdev,
 367                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 368                        if (status)
 369                                goto exit;
 370                        break;
 371                }
 372        case MAC_ADDR_TYPE_CAM_MAC:
 373                {
 374                        u32 cam_output;
 375                        u32 upper = (addr[0] << 8) | addr[1];
 376                        u32 lower =
 377                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 378                            (addr[5]);
 379                        status =
 380                            ql_wait_reg_rdy(qdev,
 381                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 382                        if (status)
 383                                goto exit;
 384                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 385                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 386                                   type);       /* type */
 387                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 388                        status =
 389                            ql_wait_reg_rdy(qdev,
 390                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 391                        if (status)
 392                                goto exit;
 393                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 394                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 395                                   type);       /* type */
 396                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 397                        status =
 398                            ql_wait_reg_rdy(qdev,
 399                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 400                        if (status)
 401                                goto exit;
 402                        ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
 403                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 404                                   type);       /* type */
 405                        /* This field should also include the queue id
 406                           and possibly the function id.  Right now we hardcode
 407                           the route field to NIC core.
 408                         */
 409                        cam_output = (CAM_OUT_ROUTE_NIC |
 410                                      (qdev->
 411                                       func << CAM_OUT_FUNC_SHIFT) |
 412                                        (0 << CAM_OUT_CQ_ID_SHIFT));
 413                        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 414                                cam_output |= CAM_OUT_RV;
 415                        /* route to NIC core */
 416                        ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 417                        break;
 418                }
 419        case MAC_ADDR_TYPE_VLAN:
 420                {
 421                        u32 enable_bit = *((u32 *) &addr[0]);
 422                        /* For VLAN, the addr actually holds a bit that
 423                         * either enables or disables the vlan id we are
 424                         * addressing. It's either MAC_ADDR_E on or off.
 425                         * That's bit-27 we're talking about.
 426                         */
 427                        status =
 428                            ql_wait_reg_rdy(qdev,
 429                                MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 430                        if (status)
 431                                goto exit;
 432                        ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
 433                                   (index << MAC_ADDR_IDX_SHIFT) |      /* index */
 434                                   type |       /* type */
 435                                   enable_bit); /* enable/disable */
 436                        break;
 437                }
 438        case MAC_ADDR_TYPE_MULTI_FLTR:
 439        default:
 440                netif_crit(qdev, ifup, qdev->ndev,
 441                           "Address type %d not yet supported.\n", type);
 442                status = -EPERM;
 443        }
 444exit:
 445        return status;
 446}
 447
 448/* Set or clear MAC address in hardware. We sometimes
 449 * have to clear it to prevent wrong frame routing
 450 * especially in a bonding environment.
 451 */
 452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 453{
 454        int status;
 455        char zero_mac_addr[ETH_ALEN];
 456        char *addr;
 457
 458        if (set) {
 459                addr = &qdev->current_mac_addr[0];
 460                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 461                             "Set Mac addr %pM\n", addr);
 462        } else {
 463                eth_zero_addr(zero_mac_addr);
 464                addr = &zero_mac_addr[0];
 465                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 466                             "Clearing MAC address\n");
 467        }
 468        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 469        if (status)
 470                return status;
 471        status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
 472                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
 473        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 474        if (status)
 475                netif_err(qdev, ifup, qdev->ndev,
 476                          "Failed to init mac address.\n");
 477        return status;
 478}
 479
 480void ql_link_on(struct ql_adapter *qdev)
 481{
 482        netif_err(qdev, link, qdev->ndev, "Link is up.\n");
 483        netif_carrier_on(qdev->ndev);
 484        ql_set_mac_addr(qdev, 1);
 485}
 486
 487void ql_link_off(struct ql_adapter *qdev)
 488{
 489        netif_err(qdev, link, qdev->ndev, "Link is down.\n");
 490        netif_carrier_off(qdev->ndev);
 491        ql_set_mac_addr(qdev, 0);
 492}
 493
 494/* Get a specific frame routing value from the CAM.
 495 * Used for debug and reg dump.
 496 */
 497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 498{
 499        int status = 0;
 500
 501        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 502        if (status)
 503                goto exit;
 504
 505        ql_write32(qdev, RT_IDX,
 506                   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 507        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
 508        if (status)
 509                goto exit;
 510        *value = ql_read32(qdev, RT_DATA);
 511exit:
 512        return status;
 513}
 514
 515/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 516 * to route different frame types to various inbound queues.  We send broadcast/
 517 * multicast/error frames to the default queue for slow handling,
 518 * and CAM hit/RSS frames to the fast handling queues.
 519 */
 520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 521                              int enable)
 522{
 523        int status = -EINVAL; /* Return error if no mask match. */
 524        u32 value = 0;
 525
 526        switch (mask) {
 527        case RT_IDX_CAM_HIT:
 528                {
 529                        value = RT_IDX_DST_CAM_Q |      /* dest */
 530                            RT_IDX_TYPE_NICQ |  /* type */
 531                            (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 532                        break;
 533                }
 534        case RT_IDX_VALID:      /* Promiscuous Mode frames. */
 535                {
 536                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 537                            RT_IDX_TYPE_NICQ |  /* type */
 538                            (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 539                        break;
 540                }
 541        case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
 542                {
 543                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 544                            RT_IDX_TYPE_NICQ |  /* type */
 545                            (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 546                        break;
 547                }
 548        case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
 549                {
 550                        value = RT_IDX_DST_DFLT_Q | /* dest */
 551                                RT_IDX_TYPE_NICQ | /* type */
 552                                (RT_IDX_IP_CSUM_ERR_SLOT <<
 553                                RT_IDX_IDX_SHIFT); /* index */
 554                        break;
 555                }
 556        case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
 557                {
 558                        value = RT_IDX_DST_DFLT_Q | /* dest */
 559                                RT_IDX_TYPE_NICQ | /* type */
 560                                (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
 561                                RT_IDX_IDX_SHIFT); /* index */
 562                        break;
 563                }
 564        case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
 565                {
 566                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 567                            RT_IDX_TYPE_NICQ |  /* type */
 568                            (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 569                        break;
 570                }
 571        case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
 572                {
 573                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 574                            RT_IDX_TYPE_NICQ |  /* type */
 575                            (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 576                        break;
 577                }
 578        case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
 579                {
 580                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 581                            RT_IDX_TYPE_NICQ |  /* type */
 582                            (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 583                        break;
 584                }
 585        case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
 586                {
 587                        value = RT_IDX_DST_RSS |        /* dest */
 588                            RT_IDX_TYPE_NICQ |  /* type */
 589                            (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 590                        break;
 591                }
 592        case 0:         /* Clear the E-bit on an entry. */
 593                {
 594                        value = RT_IDX_DST_DFLT_Q |     /* dest */
 595                            RT_IDX_TYPE_NICQ |  /* type */
 596                            (index << RT_IDX_IDX_SHIFT);/* index */
 597                        break;
 598                }
 599        default:
 600                netif_err(qdev, ifup, qdev->ndev,
 601                          "Mask type %d not yet supported.\n", mask);
 602                status = -EPERM;
 603                goto exit;
 604        }
 605
 606        if (value) {
 607                status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 608                if (status)
 609                        goto exit;
 610                value |= (enable ? RT_IDX_E : 0);
 611                ql_write32(qdev, RT_IDX, value);
 612                ql_write32(qdev, RT_DATA, enable ? mask : 0);
 613        }
 614exit:
 615        return status;
 616}
 617
 618static void ql_enable_interrupts(struct ql_adapter *qdev)
 619{
 620        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 621}
 622
 623static void ql_disable_interrupts(struct ql_adapter *qdev)
 624{
 625        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 626}
 627
 628/* If we're running with multiple MSI-X vectors then we enable on the fly.
 629 * Otherwise, we may have multiple outstanding workers and don't want to
 630 * enable until the last one finishes. In this case, the irq_cnt gets
 631 * incremented every time we queue a worker and decremented every time
 632 * a worker finishes.  Once it hits zero we enable the interrupt.
 633 */
 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 635{
 636        u32 var = 0;
 637        unsigned long hw_flags = 0;
 638        struct intr_context *ctx = qdev->intr_context + intr;
 639
 640        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 641                /* Always enable if we're MSIX multi interrupts and
 642                 * it's not the default (zeroeth) interrupt.
 643                 */
 644                ql_write32(qdev, INTR_EN,
 645                           ctx->intr_en_mask);
 646                var = ql_read32(qdev, STS);
 647                return var;
 648        }
 649
 650        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 651        if (atomic_dec_and_test(&ctx->irq_cnt)) {
 652                ql_write32(qdev, INTR_EN,
 653                           ctx->intr_en_mask);
 654                var = ql_read32(qdev, STS);
 655        }
 656        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 657        return var;
 658}
 659
 660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 661{
 662        u32 var = 0;
 663        struct intr_context *ctx;
 664
 665        /* HW disables for us if we're MSIX multi interrupts and
 666         * it's not the default (zeroeth) interrupt.
 667         */
 668        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 669                return 0;
 670
 671        ctx = qdev->intr_context + intr;
 672        spin_lock(&qdev->hw_lock);
 673        if (!atomic_read(&ctx->irq_cnt)) {
 674                ql_write32(qdev, INTR_EN,
 675                ctx->intr_dis_mask);
 676                var = ql_read32(qdev, STS);
 677        }
 678        atomic_inc(&ctx->irq_cnt);
 679        spin_unlock(&qdev->hw_lock);
 680        return var;
 681}
 682
 683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 684{
 685        int i;
 686        for (i = 0; i < qdev->intr_count; i++) {
 687                /* The enable call does a atomic_dec_and_test
 688                 * and enables only if the result is zero.
 689                 * So we precharge it here.
 690                 */
 691                if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 692                        i == 0))
 693                        atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 694                ql_enable_completion_interrupt(qdev, i);
 695        }
 696
 697}
 698
 699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
 700{
 701        int status, i;
 702        u16 csum = 0;
 703        __le16 *flash = (__le16 *)&qdev->flash;
 704
 705        status = strncmp((char *)&qdev->flash, str, 4);
 706        if (status) {
 707                netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
 708                return  status;
 709        }
 710
 711        for (i = 0; i < size; i++)
 712                csum += le16_to_cpu(*flash++);
 713
 714        if (csum)
 715                netif_err(qdev, ifup, qdev->ndev,
 716                          "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 717
 718        return csum;
 719}
 720
 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 722{
 723        int status = 0;
 724        /* wait for reg to come ready */
 725        status = ql_wait_reg_rdy(qdev,
 726                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 727        if (status)
 728                goto exit;
 729        /* set up for reg read */
 730        ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 731        /* wait for reg to come ready */
 732        status = ql_wait_reg_rdy(qdev,
 733                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 734        if (status)
 735                goto exit;
 736         /* This data is stored on flash as an array of
 737         * __le32.  Since ql_read32() returns cpu endian
 738         * we need to swap it back.
 739         */
 740        *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
 741exit:
 742        return status;
 743}
 744
 745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 746{
 747        u32 i, size;
 748        int status;
 749        __le32 *p = (__le32 *)&qdev->flash;
 750        u32 offset;
 751        u8 mac_addr[6];
 752
 753        /* Get flash offset for function and adjust
 754         * for dword access.
 755         */
 756        if (!qdev->port)
 757                offset = FUNC0_FLASH_OFFSET / sizeof(u32);
 758        else
 759                offset = FUNC1_FLASH_OFFSET / sizeof(u32);
 760
 761        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 762                return -ETIMEDOUT;
 763
 764        size = sizeof(struct flash_params_8000) / sizeof(u32);
 765        for (i = 0; i < size; i++, p++) {
 766                status = ql_read_flash_word(qdev, i+offset, p);
 767                if (status) {
 768                        netif_err(qdev, ifup, qdev->ndev,
 769                                  "Error reading flash.\n");
 770                        goto exit;
 771                }
 772        }
 773
 774        status = ql_validate_flash(qdev,
 775                        sizeof(struct flash_params_8000) / sizeof(u16),
 776                        "8000");
 777        if (status) {
 778                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 779                status = -EINVAL;
 780                goto exit;
 781        }
 782
 783        /* Extract either manufacturer or BOFM modified
 784         * MAC address.
 785         */
 786        if (qdev->flash.flash_params_8000.data_type1 == 2)
 787                memcpy(mac_addr,
 788                        qdev->flash.flash_params_8000.mac_addr1,
 789                        qdev->ndev->addr_len);
 790        else
 791                memcpy(mac_addr,
 792                        qdev->flash.flash_params_8000.mac_addr,
 793                        qdev->ndev->addr_len);
 794
 795        if (!is_valid_ether_addr(mac_addr)) {
 796                netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
 797                status = -EINVAL;
 798                goto exit;
 799        }
 800
 801        memcpy(qdev->ndev->dev_addr,
 802                mac_addr,
 803                qdev->ndev->addr_len);
 804
 805exit:
 806        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 807        return status;
 808}
 809
 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 811{
 812        int i;
 813        int status;
 814        __le32 *p = (__le32 *)&qdev->flash;
 815        u32 offset = 0;
 816        u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
 817
 818        /* Second function's parameters follow the first
 819         * function's.
 820         */
 821        if (qdev->port)
 822                offset = size;
 823
 824        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 825                return -ETIMEDOUT;
 826
 827        for (i = 0; i < size; i++, p++) {
 828                status = ql_read_flash_word(qdev, i+offset, p);
 829                if (status) {
 830                        netif_err(qdev, ifup, qdev->ndev,
 831                                  "Error reading flash.\n");
 832                        goto exit;
 833                }
 834
 835        }
 836
 837        status = ql_validate_flash(qdev,
 838                        sizeof(struct flash_params_8012) / sizeof(u16),
 839                        "8012");
 840        if (status) {
 841                netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 842                status = -EINVAL;
 843                goto exit;
 844        }
 845
 846        if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
 847                status = -EINVAL;
 848                goto exit;
 849        }
 850
 851        memcpy(qdev->ndev->dev_addr,
 852                qdev->flash.flash_params_8012.mac_addr,
 853                qdev->ndev->addr_len);
 854
 855exit:
 856        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 857        return status;
 858}
 859
 860/* xgmac register are located behind the xgmac_addr and xgmac_data
 861 * register pair.  Each read/write requires us to wait for the ready
 862 * bit before reading/writing the data.
 863 */
 864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 865{
 866        int status;
 867        /* wait for reg to come ready */
 868        status = ql_wait_reg_rdy(qdev,
 869                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 870        if (status)
 871                return status;
 872        /* write the data to the data reg */
 873        ql_write32(qdev, XGMAC_DATA, data);
 874        /* trigger the write */
 875        ql_write32(qdev, XGMAC_ADDR, reg);
 876        return status;
 877}
 878
 879/* xgmac register are located behind the xgmac_addr and xgmac_data
 880 * register pair.  Each read/write requires us to wait for the ready
 881 * bit before reading/writing the data.
 882 */
 883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 884{
 885        int status = 0;
 886        /* wait for reg to come ready */
 887        status = ql_wait_reg_rdy(qdev,
 888                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 889        if (status)
 890                goto exit;
 891        /* set up for reg read */
 892        ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 893        /* wait for reg to come ready */
 894        status = ql_wait_reg_rdy(qdev,
 895                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 896        if (status)
 897                goto exit;
 898        /* get the data */
 899        *data = ql_read32(qdev, XGMAC_DATA);
 900exit:
 901        return status;
 902}
 903
 904/* This is used for reading the 64-bit statistics regs. */
 905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 906{
 907        int status = 0;
 908        u32 hi = 0;
 909        u32 lo = 0;
 910
 911        status = ql_read_xgmac_reg(qdev, reg, &lo);
 912        if (status)
 913                goto exit;
 914
 915        status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 916        if (status)
 917                goto exit;
 918
 919        *data = (u64) lo | ((u64) hi << 32);
 920
 921exit:
 922        return status;
 923}
 924
 925static int ql_8000_port_initialize(struct ql_adapter *qdev)
 926{
 927        int status;
 928        /*
 929         * Get MPI firmware version for driver banner
 930         * and ethool info.
 931         */
 932        status = ql_mb_about_fw(qdev);
 933        if (status)
 934                goto exit;
 935        status = ql_mb_get_fw_state(qdev);
 936        if (status)
 937                goto exit;
 938        /* Wake up a worker to get/set the TX/RX frame sizes. */
 939        queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
 940exit:
 941        return status;
 942}
 943
 944/* Take the MAC Core out of reset.
 945 * Enable statistics counting.
 946 * Take the transmitter/receiver out of reset.
 947 * This functionality may be done in the MPI firmware at a
 948 * later date.
 949 */
 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
 951{
 952        int status = 0;
 953        u32 data;
 954
 955        if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 956                /* Another function has the semaphore, so
 957                 * wait for the port init bit to come ready.
 958                 */
 959                netif_info(qdev, link, qdev->ndev,
 960                           "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 961                status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 962                if (status) {
 963                        netif_crit(qdev, link, qdev->ndev,
 964                                   "Port initialize timed out.\n");
 965                }
 966                return status;
 967        }
 968
 969        netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
 970        /* Set the core reset. */
 971        status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 972        if (status)
 973                goto end;
 974        data |= GLOBAL_CFG_RESET;
 975        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 976        if (status)
 977                goto end;
 978
 979        /* Clear the core reset and turn on jumbo for receiver. */
 980        data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
 981        data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
 982        data |= GLOBAL_CFG_TX_STAT_EN;
 983        data |= GLOBAL_CFG_RX_STAT_EN;
 984        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 985        if (status)
 986                goto end;
 987
 988        /* Enable transmitter, and clear it's reset. */
 989        status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 990        if (status)
 991                goto end;
 992        data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
 993        data |= TX_CFG_EN;      /* Enable the transmitter. */
 994        status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 995        if (status)
 996                goto end;
 997
 998        /* Enable receiver and clear it's reset. */
 999        status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000        if (status)
1001                goto end;
1002        data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1003        data |= RX_CFG_EN;      /* Enable the receiver. */
1004        status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005        if (status)
1006                goto end;
1007
1008        /* Turn on jumbo. */
1009        status =
1010            ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011        if (status)
1012                goto end;
1013        status =
1014            ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015        if (status)
1016                goto end;
1017
1018        /* Signal to the world that the port is enabled.        */
1019        ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021        ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022        return status;
1023}
1024
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027        return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
1030/* Get the next large buffer. */
1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032{
1033        struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034        rx_ring->lbq_curr_idx++;
1035        if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036                rx_ring->lbq_curr_idx = 0;
1037        rx_ring->lbq_free_cnt++;
1038        return lbq_desc;
1039}
1040
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042                struct rx_ring *rx_ring)
1043{
1044        struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046        pci_dma_sync_single_for_cpu(qdev->pdev,
1047                                        dma_unmap_addr(lbq_desc, mapaddr),
1048                                    rx_ring->lbq_buf_size,
1049                                        PCI_DMA_FROMDEVICE);
1050
1051        /* If it's the last chunk of our master page then
1052         * we unmap it.
1053         */
1054        if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055                                        == ql_lbq_block_size(qdev))
1056                pci_unmap_page(qdev->pdev,
1057                                lbq_desc->p.pg_chunk.map,
1058                                ql_lbq_block_size(qdev),
1059                                PCI_DMA_FROMDEVICE);
1060        return lbq_desc;
1061}
1062
1063/* Get the next small buffer. */
1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065{
1066        struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067        rx_ring->sbq_curr_idx++;
1068        if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069                rx_ring->sbq_curr_idx = 0;
1070        rx_ring->sbq_free_cnt++;
1071        return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077        rx_ring->cnsmr_idx++;
1078        rx_ring->curr_entry++;
1079        if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080                rx_ring->cnsmr_idx = 0;
1081                rx_ring->curr_entry = rx_ring->cq_base;
1082        }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087        ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091                                                struct bq_desc *lbq_desc)
1092{
1093        if (!rx_ring->pg_chunk.page) {
1094                u64 map;
1095                rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
1096                                                qdev->lbq_buf_order);
1097                if (unlikely(!rx_ring->pg_chunk.page)) {
1098                        netif_err(qdev, drv, qdev->ndev,
1099                                  "page allocation failed.\n");
1100                        return -ENOMEM;
1101                }
1102                rx_ring->pg_chunk.offset = 0;
1103                map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                        0, ql_lbq_block_size(qdev),
1105                                        PCI_DMA_FROMDEVICE);
1106                if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                        __free_pages(rx_ring->pg_chunk.page,
1108                                        qdev->lbq_buf_order);
1109                        rx_ring->pg_chunk.page = NULL;
1110                        netif_err(qdev, drv, qdev->ndev,
1111                                  "PCI mapping failed.\n");
1112                        return -ENOMEM;
1113                }
1114                rx_ring->pg_chunk.map = map;
1115                rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1116        }
1117
1118        /* Copy the current master pg_chunk info
1119         * to the current descriptor.
1120         */
1121        lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122
1123        /* Adjust the master page chunk for next
1124         * buffer get.
1125         */
1126        rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127        if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128                rx_ring->pg_chunk.page = NULL;
1129                lbq_desc->p.pg_chunk.last_flag = 1;
1130        } else {
1131                rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132                get_page(rx_ring->pg_chunk.page);
1133                lbq_desc->p.pg_chunk.last_flag = 0;
1134        }
1135        return 0;
1136}
1137/* Process (refill) a large buffer queue. */
1138static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139{
1140        u32 clean_idx = rx_ring->lbq_clean_idx;
1141        u32 start_idx = clean_idx;
1142        struct bq_desc *lbq_desc;
1143        u64 map;
1144        int i;
1145
1146        while (rx_ring->lbq_free_cnt > 32) {
1147                for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1148                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149                                     "lbq: try cleaning clean_idx = %d.\n",
1150                                     clean_idx);
1151                        lbq_desc = &rx_ring->lbq[clean_idx];
1152                        if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1153                                rx_ring->lbq_clean_idx = clean_idx;
1154                                netif_err(qdev, ifup, qdev->ndev,
1155                                                "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1156                                                i, clean_idx);
1157                                return;
1158                        }
1159
1160                        map = lbq_desc->p.pg_chunk.map +
1161                                lbq_desc->p.pg_chunk.offset;
1162                        dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163                        dma_unmap_len_set(lbq_desc, maplen,
1164                                        rx_ring->lbq_buf_size);
1165                        *lbq_desc->addr = cpu_to_le64(map);
1166
1167                        pci_dma_sync_single_for_device(qdev->pdev, map,
1168                                                rx_ring->lbq_buf_size,
1169                                                PCI_DMA_FROMDEVICE);
1170                        clean_idx++;
1171                        if (clean_idx == rx_ring->lbq_len)
1172                                clean_idx = 0;
1173                }
1174
1175                rx_ring->lbq_clean_idx = clean_idx;
1176                rx_ring->lbq_prod_idx += 16;
1177                if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178                        rx_ring->lbq_prod_idx = 0;
1179                rx_ring->lbq_free_cnt -= 16;
1180        }
1181
1182        if (start_idx != clean_idx) {
1183                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184                             "lbq: updating prod idx = %d.\n",
1185                             rx_ring->lbq_prod_idx);
1186                ql_write_db_reg(rx_ring->lbq_prod_idx,
1187                                rx_ring->lbq_prod_idx_db_reg);
1188        }
1189}
1190
1191/* Process (refill) a small buffer queue. */
1192static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193{
1194        u32 clean_idx = rx_ring->sbq_clean_idx;
1195        u32 start_idx = clean_idx;
1196        struct bq_desc *sbq_desc;
1197        u64 map;
1198        int i;
1199
1200        while (rx_ring->sbq_free_cnt > 16) {
1201                for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1202                        sbq_desc = &rx_ring->sbq[clean_idx];
1203                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204                                     "sbq: try cleaning clean_idx = %d.\n",
1205                                     clean_idx);
1206                        if (sbq_desc->p.skb == NULL) {
1207                                netif_printk(qdev, rx_status, KERN_DEBUG,
1208                                             qdev->ndev,
1209                                             "sbq: getting new skb for index %d.\n",
1210                                             sbq_desc->index);
1211                                sbq_desc->p.skb =
1212                                    netdev_alloc_skb(qdev->ndev,
1213                                                     SMALL_BUFFER_SIZE);
1214                                if (sbq_desc->p.skb == NULL) {
1215                                        rx_ring->sbq_clean_idx = clean_idx;
1216                                        return;
1217                                }
1218                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219                                map = pci_map_single(qdev->pdev,
1220                                                     sbq_desc->p.skb->data,
1221                                                     rx_ring->sbq_buf_size,
1222                                                     PCI_DMA_FROMDEVICE);
1223                                if (pci_dma_mapping_error(qdev->pdev, map)) {
1224                                        netif_err(qdev, ifup, qdev->ndev,
1225                                                  "PCI mapping failed.\n");
1226                                        rx_ring->sbq_clean_idx = clean_idx;
1227                                        dev_kfree_skb_any(sbq_desc->p.skb);
1228                                        sbq_desc->p.skb = NULL;
1229                                        return;
1230                                }
1231                                dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232                                dma_unmap_len_set(sbq_desc, maplen,
1233                                                  rx_ring->sbq_buf_size);
1234                                *sbq_desc->addr = cpu_to_le64(map);
1235                        }
1236
1237                        clean_idx++;
1238                        if (clean_idx == rx_ring->sbq_len)
1239                                clean_idx = 0;
1240                }
1241                rx_ring->sbq_clean_idx = clean_idx;
1242                rx_ring->sbq_prod_idx += 16;
1243                if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244                        rx_ring->sbq_prod_idx = 0;
1245                rx_ring->sbq_free_cnt -= 16;
1246        }
1247
1248        if (start_idx != clean_idx) {
1249                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250                             "sbq: updating prod idx = %d.\n",
1251                             rx_ring->sbq_prod_idx);
1252                ql_write_db_reg(rx_ring->sbq_prod_idx,
1253                                rx_ring->sbq_prod_idx_db_reg);
1254        }
1255}
1256
1257static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258                                    struct rx_ring *rx_ring)
1259{
1260        ql_update_sbq(qdev, rx_ring);
1261        ql_update_lbq(qdev, rx_ring);
1262}
1263
1264/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1265 * fails at some stage, or from the interrupt when a tx completes.
1266 */
1267static void ql_unmap_send(struct ql_adapter *qdev,
1268                          struct tx_ring_desc *tx_ring_desc, int mapped)
1269{
1270        int i;
1271        for (i = 0; i < mapped; i++) {
1272                if (i == 0 || (i == 7 && mapped > 7)) {
1273                        /*
1274                         * Unmap the skb->data area, or the
1275                         * external sglist (AKA the Outbound
1276                         * Address List (OAL)).
1277                         * If its the zeroeth element, then it's
1278                         * the skb->data area.  If it's the 7th
1279                         * element and there is more than 6 frags,
1280                         * then its an OAL.
1281                         */
1282                        if (i == 7) {
1283                                netif_printk(qdev, tx_done, KERN_DEBUG,
1284                                             qdev->ndev,
1285                                             "unmapping OAL area.\n");
1286                        }
1287                        pci_unmap_single(qdev->pdev,
1288                                         dma_unmap_addr(&tx_ring_desc->map[i],
1289                                                        mapaddr),
1290                                         dma_unmap_len(&tx_ring_desc->map[i],
1291                                                       maplen),
1292                                         PCI_DMA_TODEVICE);
1293                } else {
1294                        netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295                                     "unmapping frag %d.\n", i);
1296                        pci_unmap_page(qdev->pdev,
1297                                       dma_unmap_addr(&tx_ring_desc->map[i],
1298                                                      mapaddr),
1299                                       dma_unmap_len(&tx_ring_desc->map[i],
1300                                                     maplen), PCI_DMA_TODEVICE);
1301                }
1302        }
1303
1304}
1305
1306/* Map the buffers for this transmit.  This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310                       struct ob_mac_iocb_req *mac_iocb_ptr,
1311                       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313        int len = skb_headlen(skb);
1314        dma_addr_t map;
1315        int frag_idx, err, map_idx = 0;
1316        struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317        int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319        if (frag_cnt) {
1320                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321                             "frag_cnt = %d.\n", frag_cnt);
1322        }
1323        /*
1324         * Map the skb buffer first.
1325         */
1326        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1327
1328        err = pci_dma_mapping_error(qdev->pdev, map);
1329        if (err) {
1330                netif_err(qdev, tx_queued, qdev->ndev,
1331                          "PCI mapping failed with error: %d\n", err);
1332
1333                return NETDEV_TX_BUSY;
1334        }
1335
1336        tbd->len = cpu_to_le32(len);
1337        tbd->addr = cpu_to_le64(map);
1338        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1340        map_idx++;
1341
1342        /*
1343         * This loop fills the remainder of the 8 address descriptors
1344         * in the IOCB.  If there are more than 7 fragments, then the
1345         * eighth address desc will point to an external list (OAL).
1346         * When this happens, the remainder of the frags will be stored
1347         * in this list.
1348         */
1349        for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350                skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1351                tbd++;
1352                if (frag_idx == 6 && frag_cnt > 7) {
1353                        /* Let's tack on an sglist.
1354                         * Our control block will now
1355                         * look like this:
1356                         * iocb->seg[0] = skb->data
1357                         * iocb->seg[1] = frag[0]
1358                         * iocb->seg[2] = frag[1]
1359                         * iocb->seg[3] = frag[2]
1360                         * iocb->seg[4] = frag[3]
1361                         * iocb->seg[5] = frag[4]
1362                         * iocb->seg[6] = frag[5]
1363                         * iocb->seg[7] = ptr to OAL (external sglist)
1364                         * oal->seg[0] = frag[6]
1365                         * oal->seg[1] = frag[7]
1366                         * oal->seg[2] = frag[8]
1367                         * oal->seg[3] = frag[9]
1368                         * oal->seg[4] = frag[10]
1369                         *      etc...
1370                         */
1371                        /* Tack on the OAL in the eighth segment of IOCB. */
1372                        map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1373                                             sizeof(struct oal),
1374                                             PCI_DMA_TODEVICE);
1375                        err = pci_dma_mapping_error(qdev->pdev, map);
1376                        if (err) {
1377                                netif_err(qdev, tx_queued, qdev->ndev,
1378                                          "PCI mapping outbound address list with error: %d\n",
1379                                          err);
1380                                goto map_error;
1381                        }
1382
1383                        tbd->addr = cpu_to_le64(map);
1384                        /*
1385                         * The length is the number of fragments
1386                         * that remain to be mapped times the length
1387                         * of our sglist (OAL).
1388                         */
1389                        tbd->len =
1390                            cpu_to_le32((sizeof(struct tx_buf_desc) *
1391                                         (frag_cnt - frag_idx)) | TX_DESC_C);
1392                        dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1393                                           map);
1394                        dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1395                                          sizeof(struct oal));
1396                        tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1397                        map_idx++;
1398                }
1399
1400                map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1401                                       DMA_TO_DEVICE);
1402
1403                err = dma_mapping_error(&qdev->pdev->dev, map);
1404                if (err) {
1405                        netif_err(qdev, tx_queued, qdev->ndev,
1406                                  "PCI mapping frags failed with error: %d.\n",
1407                                  err);
1408                        goto map_error;
1409                }
1410
1411                tbd->addr = cpu_to_le64(map);
1412                tbd->len = cpu_to_le32(skb_frag_size(frag));
1413                dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414                dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1415                                  skb_frag_size(frag));
1416
1417        }
1418        /* Save the number of segments we've mapped. */
1419        tx_ring_desc->map_cnt = map_idx;
1420        /* Terminate the last segment. */
1421        tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422        return NETDEV_TX_OK;
1423
1424map_error:
1425        /*
1426         * If the first frag mapping failed, then i will be zero.
1427         * This causes the unmap of the skb->data area.  Otherwise
1428         * we pass in the number of frags that mapped successfully
1429         * so they can be umapped.
1430         */
1431        ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432        return NETDEV_TX_BUSY;
1433}
1434
1435/* Categorizing receive firmware frame errors */
1436static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437                                 struct rx_ring *rx_ring)
1438{
1439        struct nic_stats *stats = &qdev->nic_stats;
1440
1441        stats->rx_err_count++;
1442        rx_ring->rx_errors++;
1443
1444        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446                stats->rx_code_err++;
1447                break;
1448        case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449                stats->rx_oversize_err++;
1450                break;
1451        case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452                stats->rx_undersize_err++;
1453                break;
1454        case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455                stats->rx_preamble_err++;
1456                break;
1457        case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458                stats->rx_frame_len_err++;
1459                break;
1460        case IB_MAC_IOCB_RSP_ERR_CRC:
1461                stats->rx_crc_err++;
1462        default:
1463                break;
1464        }
1465}
1466
1467/**
1468 * ql_update_mac_hdr_len - helper routine to update the mac header length
1469 * based on vlan tags if present
1470 */
1471static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1472                                  struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                  void *page, size_t *len)
1474{
1475        u16 *tags;
1476
1477        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1478                return;
1479        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1480                tags = (u16 *)page;
1481                /* Look for stacked vlan tags in ethertype field */
1482                if (tags[6] == ETH_P_8021Q &&
1483                    tags[8] == ETH_P_8021Q)
1484                        *len += 2 * VLAN_HLEN;
1485                else
1486                        *len += VLAN_HLEN;
1487        }
1488}
1489
1490/* Process an inbound completion from an rx ring. */
1491static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1492                                        struct rx_ring *rx_ring,
1493                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1494                                        u32 length,
1495                                        u16 vlan_id)
1496{
1497        struct sk_buff *skb;
1498        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1499        struct napi_struct *napi = &rx_ring->napi;
1500
1501        /* Frame error, so drop the packet. */
1502        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504                put_page(lbq_desc->p.pg_chunk.page);
1505                return;
1506        }
1507        napi->dev = qdev->ndev;
1508
1509        skb = napi_get_frags(napi);
1510        if (!skb) {
1511                netif_err(qdev, drv, qdev->ndev,
1512                          "Couldn't get an skb, exiting.\n");
1513                rx_ring->rx_dropped++;
1514                put_page(lbq_desc->p.pg_chunk.page);
1515                return;
1516        }
1517        prefetch(lbq_desc->p.pg_chunk.va);
1518        __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1519                             lbq_desc->p.pg_chunk.page,
1520                             lbq_desc->p.pg_chunk.offset,
1521                             length);
1522
1523        skb->len += length;
1524        skb->data_len += length;
1525        skb->truesize += length;
1526        skb_shinfo(skb)->nr_frags++;
1527
1528        rx_ring->rx_packets++;
1529        rx_ring->rx_bytes += length;
1530        skb->ip_summed = CHECKSUM_UNNECESSARY;
1531        skb_record_rx_queue(skb, rx_ring->cq_id);
1532        if (vlan_id != 0xffff)
1533                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1534        napi_gro_frags(napi);
1535}
1536
1537/* Process an inbound completion from an rx ring. */
1538static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1539                                        struct rx_ring *rx_ring,
1540                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1541                                        u32 length,
1542                                        u16 vlan_id)
1543{
1544        struct net_device *ndev = qdev->ndev;
1545        struct sk_buff *skb = NULL;
1546        void *addr;
1547        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1548        struct napi_struct *napi = &rx_ring->napi;
1549        size_t hlen = ETH_HLEN;
1550
1551        skb = netdev_alloc_skb(ndev, length);
1552        if (!skb) {
1553                rx_ring->rx_dropped++;
1554                put_page(lbq_desc->p.pg_chunk.page);
1555                return;
1556        }
1557
1558        addr = lbq_desc->p.pg_chunk.va;
1559        prefetch(addr);
1560
1561        /* Frame error, so drop the packet. */
1562        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1563                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1564                goto err_out;
1565        }
1566
1567        /* Update the MAC header length*/
1568        ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1569
1570        /* The max framesize filter on this chip is set higher than
1571         * MTU since FCoE uses 2k frames.
1572         */
1573        if (skb->len > ndev->mtu + hlen) {
1574                netif_err(qdev, drv, qdev->ndev,
1575                          "Segment too small, dropping.\n");
1576                rx_ring->rx_dropped++;
1577                goto err_out;
1578        }
1579        skb_put_data(skb, addr, hlen);
1580        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1582                     length);
1583        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1584                                lbq_desc->p.pg_chunk.offset + hlen,
1585                                length - hlen);
1586        skb->len += length - hlen;
1587        skb->data_len += length - hlen;
1588        skb->truesize += length - hlen;
1589
1590        rx_ring->rx_packets++;
1591        rx_ring->rx_bytes += skb->len;
1592        skb->protocol = eth_type_trans(skb, ndev);
1593        skb_checksum_none_assert(skb);
1594
1595        if ((ndev->features & NETIF_F_RXCSUM) &&
1596                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1597                /* TCP frame. */
1598                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1599                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600                                     "TCP checksum done!\n");
1601                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1602                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1603                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1604                        /* Unfragmented ipv4 UDP frame. */
1605                        struct iphdr *iph =
1606                                (struct iphdr *)((u8 *)addr + hlen);
1607                        if (!(iph->frag_off &
1608                                htons(IP_MF|IP_OFFSET))) {
1609                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1610                                netif_printk(qdev, rx_status, KERN_DEBUG,
1611                                             qdev->ndev,
1612                                             "UDP checksum done!\n");
1613                        }
1614                }
1615        }
1616
1617        skb_record_rx_queue(skb, rx_ring->cq_id);
1618        if (vlan_id != 0xffff)
1619                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1620        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1621                napi_gro_receive(napi, skb);
1622        else
1623                netif_receive_skb(skb);
1624        return;
1625err_out:
1626        dev_kfree_skb_any(skb);
1627        put_page(lbq_desc->p.pg_chunk.page);
1628}
1629
1630/* Process an inbound completion from an rx ring. */
1631static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1632                                        struct rx_ring *rx_ring,
1633                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1634                                        u32 length,
1635                                        u16 vlan_id)
1636{
1637        struct net_device *ndev = qdev->ndev;
1638        struct sk_buff *skb = NULL;
1639        struct sk_buff *new_skb = NULL;
1640        struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1641
1642        skb = sbq_desc->p.skb;
1643        /* Allocate new_skb and copy */
1644        new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1645        if (new_skb == NULL) {
1646                rx_ring->rx_dropped++;
1647                return;
1648        }
1649        skb_reserve(new_skb, NET_IP_ALIGN);
1650
1651        pci_dma_sync_single_for_cpu(qdev->pdev,
1652                                    dma_unmap_addr(sbq_desc, mapaddr),
1653                                    dma_unmap_len(sbq_desc, maplen),
1654                                    PCI_DMA_FROMDEVICE);
1655
1656        skb_put_data(new_skb, skb->data, length);
1657
1658        pci_dma_sync_single_for_device(qdev->pdev,
1659                                       dma_unmap_addr(sbq_desc, mapaddr),
1660                                       dma_unmap_len(sbq_desc, maplen),
1661                                       PCI_DMA_FROMDEVICE);
1662        skb = new_skb;
1663
1664        /* Frame error, so drop the packet. */
1665        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1666                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1667                dev_kfree_skb_any(skb);
1668                return;
1669        }
1670
1671        /* loopback self test for ethtool */
1672        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1673                ql_check_lb_frame(qdev, skb);
1674                dev_kfree_skb_any(skb);
1675                return;
1676        }
1677
1678        /* The max framesize filter on this chip is set higher than
1679         * MTU since FCoE uses 2k frames.
1680         */
1681        if (skb->len > ndev->mtu + ETH_HLEN) {
1682                dev_kfree_skb_any(skb);
1683                rx_ring->rx_dropped++;
1684                return;
1685        }
1686
1687        prefetch(skb->data);
1688        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1689                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690                             "%s Multicast.\n",
1691                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1693                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1694                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1695                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1696                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1697        }
1698        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1699                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1700                             "Promiscuous Packet.\n");
1701
1702        rx_ring->rx_packets++;
1703        rx_ring->rx_bytes += skb->len;
1704        skb->protocol = eth_type_trans(skb, ndev);
1705        skb_checksum_none_assert(skb);
1706
1707        /* If rx checksum is on, and there are no
1708         * csum or frame errors.
1709         */
1710        if ((ndev->features & NETIF_F_RXCSUM) &&
1711                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1712                /* TCP frame. */
1713                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1714                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715                                     "TCP checksum done!\n");
1716                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1717                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1718                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1719                        /* Unfragmented ipv4 UDP frame. */
1720                        struct iphdr *iph = (struct iphdr *) skb->data;
1721                        if (!(iph->frag_off &
1722                                htons(IP_MF|IP_OFFSET))) {
1723                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1724                                netif_printk(qdev, rx_status, KERN_DEBUG,
1725                                             qdev->ndev,
1726                                             "UDP checksum done!\n");
1727                        }
1728                }
1729        }
1730
1731        skb_record_rx_queue(skb, rx_ring->cq_id);
1732        if (vlan_id != 0xffff)
1733                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1734        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1735                napi_gro_receive(&rx_ring->napi, skb);
1736        else
1737                netif_receive_skb(skb);
1738}
1739
1740static void ql_realign_skb(struct sk_buff *skb, int len)
1741{
1742        void *temp_addr = skb->data;
1743
1744        /* Undo the skb_reserve(skb,32) we did before
1745         * giving to hardware, and realign data on
1746         * a 2-byte boundary.
1747         */
1748        skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1749        skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1750        memmove(skb->data, temp_addr, len);
1751}
1752
1753/*
1754 * This function builds an skb for the given inbound
1755 * completion.  It will be rewritten for readability in the near
1756 * future, but for not it works well.
1757 */
1758static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1759                                       struct rx_ring *rx_ring,
1760                                       struct ib_mac_iocb_rsp *ib_mac_rsp)
1761{
1762        struct bq_desc *lbq_desc;
1763        struct bq_desc *sbq_desc;
1764        struct sk_buff *skb = NULL;
1765        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1766        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1767        size_t hlen = ETH_HLEN;
1768
1769        /*
1770         * Handle the header buffer if present.
1771         */
1772        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1773            ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775                             "Header of %d bytes in small buffer.\n", hdr_len);
1776                /*
1777                 * Headers fit nicely into a small buffer.
1778                 */
1779                sbq_desc = ql_get_curr_sbuf(rx_ring);
1780                pci_unmap_single(qdev->pdev,
1781                                dma_unmap_addr(sbq_desc, mapaddr),
1782                                dma_unmap_len(sbq_desc, maplen),
1783                                PCI_DMA_FROMDEVICE);
1784                skb = sbq_desc->p.skb;
1785                ql_realign_skb(skb, hdr_len);
1786                skb_put(skb, hdr_len);
1787                sbq_desc->p.skb = NULL;
1788        }
1789
1790        /*
1791         * Handle the data buffer(s).
1792         */
1793        if (unlikely(!length)) {        /* Is there data too? */
1794                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795                             "No Data buffer in this packet.\n");
1796                return skb;
1797        }
1798
1799        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1800                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1801                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802                                     "Headers in small, data of %d bytes in small, combine them.\n",
1803                                     length);
1804                        /*
1805                         * Data is less than small buffer size so it's
1806                         * stuffed in a small buffer.
1807                         * For this case we append the data
1808                         * from the "data" small buffer to the "header" small
1809                         * buffer.
1810                         */
1811                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1812                        pci_dma_sync_single_for_cpu(qdev->pdev,
1813                                                    dma_unmap_addr
1814                                                    (sbq_desc, mapaddr),
1815                                                    dma_unmap_len
1816                                                    (sbq_desc, maplen),
1817                                                    PCI_DMA_FROMDEVICE);
1818                        skb_put_data(skb, sbq_desc->p.skb->data, length);
1819                        pci_dma_sync_single_for_device(qdev->pdev,
1820                                                       dma_unmap_addr
1821                                                       (sbq_desc,
1822                                                        mapaddr),
1823                                                       dma_unmap_len
1824                                                       (sbq_desc,
1825                                                        maplen),
1826                                                       PCI_DMA_FROMDEVICE);
1827                } else {
1828                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829                                     "%d bytes in a single small buffer.\n",
1830                                     length);
1831                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1832                        skb = sbq_desc->p.skb;
1833                        ql_realign_skb(skb, length);
1834                        skb_put(skb, length);
1835                        pci_unmap_single(qdev->pdev,
1836                                         dma_unmap_addr(sbq_desc,
1837                                                        mapaddr),
1838                                         dma_unmap_len(sbq_desc,
1839                                                       maplen),
1840                                         PCI_DMA_FROMDEVICE);
1841                        sbq_desc->p.skb = NULL;
1842                }
1843        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1844                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1845                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846                                     "Header in small, %d bytes in large. Chain large to small!\n",
1847                                     length);
1848                        /*
1849                         * The data is in a single large buffer.  We
1850                         * chain it to the header buffer's skb and let
1851                         * it rip.
1852                         */
1853                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1855                                     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1856                                     lbq_desc->p.pg_chunk.offset, length);
1857                        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1858                                                lbq_desc->p.pg_chunk.offset,
1859                                                length);
1860                        skb->len += length;
1861                        skb->data_len += length;
1862                        skb->truesize += length;
1863                } else {
1864                        /*
1865                         * The headers and data are in a single large buffer. We
1866                         * copy it to a new skb and let it go. This can happen with
1867                         * jumbo mtu on a non-TCP/UDP frame.
1868                         */
1869                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870                        skb = netdev_alloc_skb(qdev->ndev, length);
1871                        if (skb == NULL) {
1872                                netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1873                                             "No skb available, drop the packet.\n");
1874                                return NULL;
1875                        }
1876                        pci_unmap_page(qdev->pdev,
1877                                       dma_unmap_addr(lbq_desc,
1878                                                      mapaddr),
1879                                       dma_unmap_len(lbq_desc, maplen),
1880                                       PCI_DMA_FROMDEVICE);
1881                        skb_reserve(skb, NET_IP_ALIGN);
1882                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1883                                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1884                                     length);
1885                        skb_fill_page_desc(skb, 0,
1886                                                lbq_desc->p.pg_chunk.page,
1887                                                lbq_desc->p.pg_chunk.offset,
1888                                                length);
1889                        skb->len += length;
1890                        skb->data_len += length;
1891                        skb->truesize += length;
1892                        ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1893                                              lbq_desc->p.pg_chunk.va,
1894                                              &hlen);
1895                        __pskb_pull_tail(skb, hlen);
1896                }
1897        } else {
1898                /*
1899                 * The data is in a chain of large buffers
1900                 * pointed to by a small buffer.  We loop
1901                 * thru and chain them to the our small header
1902                 * buffer's skb.
1903                 * frags:  There are 18 max frags and our small
1904                 *         buffer will hold 32 of them. The thing is,
1905                 *         we'll use 3 max for our 9000 byte jumbo
1906                 *         frames.  If the MTU goes up we could
1907                 *          eventually be in trouble.
1908                 */
1909                int size, i = 0;
1910                sbq_desc = ql_get_curr_sbuf(rx_ring);
1911                pci_unmap_single(qdev->pdev,
1912                                 dma_unmap_addr(sbq_desc, mapaddr),
1913                                 dma_unmap_len(sbq_desc, maplen),
1914                                 PCI_DMA_FROMDEVICE);
1915                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1916                        /*
1917                         * This is an non TCP/UDP IP frame, so
1918                         * the headers aren't split into a small
1919                         * buffer.  We have to use the small buffer
1920                         * that contains our sg list as our skb to
1921                         * send upstairs. Copy the sg list here to
1922                         * a local buffer and use it to find the
1923                         * pages to chain.
1924                         */
1925                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1926                                     "%d bytes of headers & data in chain of large.\n",
1927                                     length);
1928                        skb = sbq_desc->p.skb;
1929                        sbq_desc->p.skb = NULL;
1930                        skb_reserve(skb, NET_IP_ALIGN);
1931                }
1932                do {
1933                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1934                        size = (length < rx_ring->lbq_buf_size) ? length :
1935                                rx_ring->lbq_buf_size;
1936
1937                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                                     "Adding page %d to skb for %d bytes.\n",
1939                                     i, size);
1940                        skb_fill_page_desc(skb, i,
1941                                                lbq_desc->p.pg_chunk.page,
1942                                                lbq_desc->p.pg_chunk.offset,
1943                                                size);
1944                        skb->len += size;
1945                        skb->data_len += size;
1946                        skb->truesize += size;
1947                        length -= size;
1948                        i++;
1949                } while (length > 0);
1950                ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1951                                      &hlen);
1952                __pskb_pull_tail(skb, hlen);
1953        }
1954        return skb;
1955}
1956
1957/* Process an inbound completion from an rx ring. */
1958static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1959                                   struct rx_ring *rx_ring,
1960                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1961                                   u16 vlan_id)
1962{
1963        struct net_device *ndev = qdev->ndev;
1964        struct sk_buff *skb = NULL;
1965
1966        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1967
1968        skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1969        if (unlikely(!skb)) {
1970                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1971                             "No skb available, drop packet.\n");
1972                rx_ring->rx_dropped++;
1973                return;
1974        }
1975
1976        /* Frame error, so drop the packet. */
1977        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1978                ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1979                dev_kfree_skb_any(skb);
1980                return;
1981        }
1982
1983        /* The max framesize filter on this chip is set higher than
1984         * MTU since FCoE uses 2k frames.
1985         */
1986        if (skb->len > ndev->mtu + ETH_HLEN) {
1987                dev_kfree_skb_any(skb);
1988                rx_ring->rx_dropped++;
1989                return;
1990        }
1991
1992        /* loopback self test for ethtool */
1993        if (test_bit(QL_SELFTEST, &qdev->flags)) {
1994                ql_check_lb_frame(qdev, skb);
1995                dev_kfree_skb_any(skb);
1996                return;
1997        }
1998
1999        prefetch(skb->data);
2000        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2001                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2002                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2003                             IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2004                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005                             IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2006                             (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007                             IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2008                rx_ring->rx_multicast++;
2009        }
2010        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2011                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012                             "Promiscuous Packet.\n");
2013        }
2014
2015        skb->protocol = eth_type_trans(skb, ndev);
2016        skb_checksum_none_assert(skb);
2017
2018        /* If rx checksum is on, and there are no
2019         * csum or frame errors.
2020         */
2021        if ((ndev->features & NETIF_F_RXCSUM) &&
2022                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2023                /* TCP frame. */
2024                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2025                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2026                                     "TCP checksum done!\n");
2027                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2028                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2029                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2030                /* Unfragmented ipv4 UDP frame. */
2031                        struct iphdr *iph = (struct iphdr *) skb->data;
2032                        if (!(iph->frag_off &
2033                                htons(IP_MF|IP_OFFSET))) {
2034                                skb->ip_summed = CHECKSUM_UNNECESSARY;
2035                                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2036                                             "TCP checksum done!\n");
2037                        }
2038                }
2039        }
2040
2041        rx_ring->rx_packets++;
2042        rx_ring->rx_bytes += skb->len;
2043        skb_record_rx_queue(skb, rx_ring->cq_id);
2044        if (vlan_id != 0xffff)
2045                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2046        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2047                napi_gro_receive(&rx_ring->napi, skb);
2048        else
2049                netif_receive_skb(skb);
2050}
2051
2052/* Process an inbound completion from an rx ring. */
2053static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2054                                        struct rx_ring *rx_ring,
2055                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
2056{
2057        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2058        u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2059                        (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2060                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2061                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2062
2063        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2064
2065        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2066                /* The data and headers are split into
2067                 * separate buffers.
2068                 */
2069                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2070                                                vlan_id);
2071        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2072                /* The data fit in a single small buffer.
2073                 * Allocate a new skb, copy the data and
2074                 * return the buffer to the free pool.
2075                 */
2076                ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2077                                                length, vlan_id);
2078        } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2079                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2080                (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2081                /* TCP packet in a page chunk that's been checksummed.
2082                 * Tack it on to our GRO skb and let it go.
2083                 */
2084                ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2085                                                length, vlan_id);
2086        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087                /* Non-TCP packet in a page chunk. Allocate an
2088                 * skb, tack it on frags, and send it up.
2089                 */
2090                ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2091                                                length, vlan_id);
2092        } else {
2093                /* Non-TCP/UDP large frames that span multiple buffers
2094                 * can be processed corrrectly by the split frame logic.
2095                 */
2096                ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2097                                                vlan_id);
2098        }
2099
2100        return (unsigned long)length;
2101}
2102
2103/* Process an outbound completion from an rx ring. */
2104static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2105                                   struct ob_mac_iocb_rsp *mac_rsp)
2106{
2107        struct tx_ring *tx_ring;
2108        struct tx_ring_desc *tx_ring_desc;
2109
2110        QL_DUMP_OB_MAC_RSP(mac_rsp);
2111        tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2112        tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2113        ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2114        tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2115        tx_ring->tx_packets++;
2116        dev_kfree_skb(tx_ring_desc->skb);
2117        tx_ring_desc->skb = NULL;
2118
2119        if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2120                                        OB_MAC_IOCB_RSP_S |
2121                                        OB_MAC_IOCB_RSP_L |
2122                                        OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2123                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2124                        netif_warn(qdev, tx_done, qdev->ndev,
2125                                   "Total descriptor length did not match transfer length.\n");
2126                }
2127                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2128                        netif_warn(qdev, tx_done, qdev->ndev,
2129                                   "Frame too short to be valid, not sent.\n");
2130                }
2131                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2132                        netif_warn(qdev, tx_done, qdev->ndev,
2133                                   "Frame too long, but sent anyway.\n");
2134                }
2135                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2136                        netif_warn(qdev, tx_done, qdev->ndev,
2137                                   "PCI backplane error. Frame not sent.\n");
2138                }
2139        }
2140        atomic_inc(&tx_ring->tx_count);
2141}
2142
2143/* Fire up a handler to reset the MPI processor. */
2144void ql_queue_fw_error(struct ql_adapter *qdev)
2145{
2146        ql_link_off(qdev);
2147        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2148}
2149
2150void ql_queue_asic_error(struct ql_adapter *qdev)
2151{
2152        ql_link_off(qdev);
2153        ql_disable_interrupts(qdev);
2154        /* Clear adapter up bit to signal the recovery
2155         * process that it shouldn't kill the reset worker
2156         * thread
2157         */
2158        clear_bit(QL_ADAPTER_UP, &qdev->flags);
2159        /* Set asic recovery bit to indicate reset process that we are
2160         * in fatal error recovery process rather than normal close
2161         */
2162        set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2163        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2164}
2165
2166static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2167                                    struct ib_ae_iocb_rsp *ib_ae_rsp)
2168{
2169        switch (ib_ae_rsp->event) {
2170        case MGMT_ERR_EVENT:
2171                netif_err(qdev, rx_err, qdev->ndev,
2172                          "Management Processor Fatal Error.\n");
2173                ql_queue_fw_error(qdev);
2174                return;
2175
2176        case CAM_LOOKUP_ERR_EVENT:
2177                netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2178                netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2179                ql_queue_asic_error(qdev);
2180                return;
2181
2182        case SOFT_ECC_ERROR_EVENT:
2183                netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2184                ql_queue_asic_error(qdev);
2185                break;
2186
2187        case PCI_ERR_ANON_BUF_RD:
2188                netdev_err(qdev->ndev, "PCI error occurred when reading "
2189                                        "anonymous buffers from rx_ring %d.\n",
2190                                        ib_ae_rsp->q_id);
2191                ql_queue_asic_error(qdev);
2192                break;
2193
2194        default:
2195                netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2196                          ib_ae_rsp->event);
2197                ql_queue_asic_error(qdev);
2198                break;
2199        }
2200}
2201
2202static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2203{
2204        struct ql_adapter *qdev = rx_ring->qdev;
2205        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2206        struct ob_mac_iocb_rsp *net_rsp = NULL;
2207        int count = 0;
2208
2209        struct tx_ring *tx_ring;
2210        /* While there are entries in the completion queue. */
2211        while (prod != rx_ring->cnsmr_idx) {
2212
2213                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214                             "cq_id = %d, prod = %d, cnsmr = %d\n",
2215                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2216
2217                net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2218                rmb();
2219                switch (net_rsp->opcode) {
2220
2221                case OPCODE_OB_MAC_TSO_IOCB:
2222                case OPCODE_OB_MAC_IOCB:
2223                        ql_process_mac_tx_intr(qdev, net_rsp);
2224                        break;
2225                default:
2226                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228                                     net_rsp->opcode);
2229                }
2230                count++;
2231                ql_update_cq(rx_ring);
2232                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2233        }
2234        if (!net_rsp)
2235                return 0;
2236        ql_write_cq_idx(rx_ring);
2237        tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2238        if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2239                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2240                        /*
2241                         * The queue got stopped because the tx_ring was full.
2242                         * Wake it up, because it's now at least 25% empty.
2243                         */
2244                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2245        }
2246
2247        return count;
2248}
2249
2250static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2251{
2252        struct ql_adapter *qdev = rx_ring->qdev;
2253        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2254        struct ql_net_rsp_iocb *net_rsp;
2255        int count = 0;
2256
2257        /* While there are entries in the completion queue. */
2258        while (prod != rx_ring->cnsmr_idx) {
2259
2260                netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2261                             "cq_id = %d, prod = %d, cnsmr = %d\n",
2262                             rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2263
2264                net_rsp = rx_ring->curr_entry;
2265                rmb();
2266                switch (net_rsp->opcode) {
2267                case OPCODE_IB_MAC_IOCB:
2268                        ql_process_mac_rx_intr(qdev, rx_ring,
2269                                               (struct ib_mac_iocb_rsp *)
2270                                               net_rsp);
2271                        break;
2272
2273                case OPCODE_IB_AE_IOCB:
2274                        ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2275                                                net_rsp);
2276                        break;
2277                default:
2278                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279                                     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2280                                     net_rsp->opcode);
2281                        break;
2282                }
2283                count++;
2284                ql_update_cq(rx_ring);
2285                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2286                if (count == budget)
2287                        break;
2288        }
2289        ql_update_buffer_queues(qdev, rx_ring);
2290        ql_write_cq_idx(rx_ring);
2291        return count;
2292}
2293
2294static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2295{
2296        struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297        struct ql_adapter *qdev = rx_ring->qdev;
2298        struct rx_ring *trx_ring;
2299        int i, work_done = 0;
2300        struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2301
2302        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303                     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2304
2305        /* Service the TX rings first.  They start
2306         * right after the RSS rings. */
2307        for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308                trx_ring = &qdev->rx_ring[i];
2309                /* If this TX completion ring belongs to this vector and
2310                 * it's not empty then service it.
2311                 */
2312                if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313                        (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314                                        trx_ring->cnsmr_idx)) {
2315                        netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316                                     "%s: Servicing TX completion ring %d.\n",
2317                                     __func__, trx_ring->cq_id);
2318                        ql_clean_outbound_rx_ring(trx_ring);
2319                }
2320        }
2321
2322        /*
2323         * Now service the RSS ring if it's active.
2324         */
2325        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326                                        rx_ring->cnsmr_idx) {
2327                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328                             "%s: Servicing RX completion ring %d.\n",
2329                             __func__, rx_ring->cq_id);
2330                work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2331        }
2332
2333        if (work_done < budget) {
2334                napi_complete_done(napi, work_done);
2335                ql_enable_completion_interrupt(qdev, rx_ring->irq);
2336        }
2337        return work_done;
2338}
2339
2340static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2341{
2342        struct ql_adapter *qdev = netdev_priv(ndev);
2343
2344        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2345                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2346                                 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2347        } else {
2348                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349        }
2350}
2351
2352/**
2353 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2354 * based on the features to enable/disable hardware vlan accel
2355 */
2356static int qlge_update_hw_vlan_features(struct net_device *ndev,
2357                                        netdev_features_t features)
2358{
2359        struct ql_adapter *qdev = netdev_priv(ndev);
2360        int status = 0;
2361        bool need_restart = netif_running(ndev);
2362
2363        if (need_restart) {
2364                status = ql_adapter_down(qdev);
2365                if (status) {
2366                        netif_err(qdev, link, qdev->ndev,
2367                                  "Failed to bring down the adapter\n");
2368                        return status;
2369                }
2370        }
2371
2372        /* update the features with resent change */
2373        ndev->features = features;
2374
2375        if (need_restart) {
2376                status = ql_adapter_up(qdev);
2377                if (status) {
2378                        netif_err(qdev, link, qdev->ndev,
2379                                  "Failed to bring up the adapter\n");
2380                        return status;
2381                }
2382        }
2383
2384        return status;
2385}
2386
2387static int qlge_set_features(struct net_device *ndev,
2388        netdev_features_t features)
2389{
2390        netdev_features_t changed = ndev->features ^ features;
2391        int err;
2392
2393        if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2394                /* Update the behavior of vlan accel in the adapter */
2395                err = qlge_update_hw_vlan_features(ndev, features);
2396                if (err)
2397                        return err;
2398
2399                qlge_vlan_mode(ndev, features);
2400        }
2401
2402        return 0;
2403}
2404
2405static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2406{
2407        u32 enable_bit = MAC_ADDR_E;
2408        int err;
2409
2410        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2411                                  MAC_ADDR_TYPE_VLAN, vid);
2412        if (err)
2413                netif_err(qdev, ifup, qdev->ndev,
2414                          "Failed to init vlan address.\n");
2415        return err;
2416}
2417
2418static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2419{
2420        struct ql_adapter *qdev = netdev_priv(ndev);
2421        int status;
2422        int err;
2423
2424        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2425        if (status)
2426                return status;
2427
2428        err = __qlge_vlan_rx_add_vid(qdev, vid);
2429        set_bit(vid, qdev->active_vlans);
2430
2431        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2432
2433        return err;
2434}
2435
2436static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2437{
2438        u32 enable_bit = 0;
2439        int err;
2440
2441        err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2442                                  MAC_ADDR_TYPE_VLAN, vid);
2443        if (err)
2444                netif_err(qdev, ifup, qdev->ndev,
2445                          "Failed to clear vlan address.\n");
2446        return err;
2447}
2448
2449static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2450{
2451        struct ql_adapter *qdev = netdev_priv(ndev);
2452        int status;
2453        int err;
2454
2455        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2456        if (status)
2457                return status;
2458
2459        err = __qlge_vlan_rx_kill_vid(qdev, vid);
2460        clear_bit(vid, qdev->active_vlans);
2461
2462        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2463
2464        return err;
2465}
2466
2467static void qlge_restore_vlan(struct ql_adapter *qdev)
2468{
2469        int status;
2470        u16 vid;
2471
2472        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2473        if (status)
2474                return;
2475
2476        for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2477                __qlge_vlan_rx_add_vid(qdev, vid);
2478
2479        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2480}
2481
2482/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2483static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2484{
2485        struct rx_ring *rx_ring = dev_id;
2486        napi_schedule(&rx_ring->napi);
2487        return IRQ_HANDLED;
2488}
2489
2490/* This handles a fatal error, MPI activity, and the default
2491 * rx_ring in an MSI-X multiple vector environment.
2492 * In MSI/Legacy environment it also process the rest of
2493 * the rx_rings.
2494 */
2495static irqreturn_t qlge_isr(int irq, void *dev_id)
2496{
2497        struct rx_ring *rx_ring = dev_id;
2498        struct ql_adapter *qdev = rx_ring->qdev;
2499        struct intr_context *intr_context = &qdev->intr_context[0];
2500        u32 var;
2501        int work_done = 0;
2502
2503        spin_lock(&qdev->hw_lock);
2504        if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2505                netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2506                             "Shared Interrupt, Not ours!\n");
2507                spin_unlock(&qdev->hw_lock);
2508                return IRQ_NONE;
2509        }
2510        spin_unlock(&qdev->hw_lock);
2511
2512        var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2513
2514        /*
2515         * Check for fatal error.
2516         */
2517        if (var & STS_FE) {
2518                ql_queue_asic_error(qdev);
2519                netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2520                var = ql_read32(qdev, ERR_STS);
2521                netdev_err(qdev->ndev, "Resetting chip. "
2522                                        "Error Status Register = 0x%x\n", var);
2523                return IRQ_HANDLED;
2524        }
2525
2526        /*
2527         * Check MPI processor activity.
2528         */
2529        if ((var & STS_PI) &&
2530                (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2531                /*
2532                 * We've got an async event or mailbox completion.
2533                 * Handle it and clear the source of the interrupt.
2534                 */
2535                netif_err(qdev, intr, qdev->ndev,
2536                          "Got MPI processor interrupt.\n");
2537                ql_disable_completion_interrupt(qdev, intr_context->intr);
2538                ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2539                queue_delayed_work_on(smp_processor_id(),
2540                                qdev->workqueue, &qdev->mpi_work, 0);
2541                work_done++;
2542        }
2543
2544        /*
2545         * Get the bit-mask that shows the active queues for this
2546         * pass.  Compare it to the queues that this irq services
2547         * and call napi if there's a match.
2548         */
2549        var = ql_read32(qdev, ISR1);
2550        if (var & intr_context->irq_mask) {
2551                netif_info(qdev, intr, qdev->ndev,
2552                           "Waking handler for rx_ring[0].\n");
2553                ql_disable_completion_interrupt(qdev, intr_context->intr);
2554                napi_schedule(&rx_ring->napi);
2555                work_done++;
2556        }
2557        ql_enable_completion_interrupt(qdev, intr_context->intr);
2558        return work_done ? IRQ_HANDLED : IRQ_NONE;
2559}
2560
2561static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2562{
2563
2564        if (skb_is_gso(skb)) {
2565                int err;
2566                __be16 l3_proto = vlan_get_protocol(skb);
2567
2568                err = skb_cow_head(skb, 0);
2569                if (err < 0)
2570                        return err;
2571
2572                mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2573                mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2574                mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2575                mac_iocb_ptr->total_hdrs_len =
2576                    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2577                mac_iocb_ptr->net_trans_offset =
2578                    cpu_to_le16(skb_network_offset(skb) |
2579                                skb_transport_offset(skb)
2580                                << OB_MAC_TRANSPORT_HDR_SHIFT);
2581                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2582                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2583                if (likely(l3_proto == htons(ETH_P_IP))) {
2584                        struct iphdr *iph = ip_hdr(skb);
2585                        iph->check = 0;
2586                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2587                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2588                                                                 iph->daddr, 0,
2589                                                                 IPPROTO_TCP,
2590                                                                 0);
2591                } else if (l3_proto == htons(ETH_P_IPV6)) {
2592                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2593                        tcp_hdr(skb)->check =
2594                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2595                                             &ipv6_hdr(skb)->daddr,
2596                                             0, IPPROTO_TCP, 0);
2597                }
2598                return 1;
2599        }
2600        return 0;
2601}
2602
2603static void ql_hw_csum_setup(struct sk_buff *skb,
2604                             struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2605{
2606        int len;
2607        struct iphdr *iph = ip_hdr(skb);
2608        __sum16 *check;
2609        mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2610        mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2611        mac_iocb_ptr->net_trans_offset =
2612                cpu_to_le16(skb_network_offset(skb) |
2613                skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2614
2615        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2616        len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2617        if (likely(iph->protocol == IPPROTO_TCP)) {
2618                check = &(tcp_hdr(skb)->check);
2619                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2620                mac_iocb_ptr->total_hdrs_len =
2621                    cpu_to_le16(skb_transport_offset(skb) +
2622                                (tcp_hdr(skb)->doff << 2));
2623        } else {
2624                check = &(udp_hdr(skb)->check);
2625                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2626                mac_iocb_ptr->total_hdrs_len =
2627                    cpu_to_le16(skb_transport_offset(skb) +
2628                                sizeof(struct udphdr));
2629        }
2630        *check = ~csum_tcpudp_magic(iph->saddr,
2631                                    iph->daddr, len, iph->protocol, 0);
2632}
2633
2634static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2635{
2636        struct tx_ring_desc *tx_ring_desc;
2637        struct ob_mac_iocb_req *mac_iocb_ptr;
2638        struct ql_adapter *qdev = netdev_priv(ndev);
2639        int tso;
2640        struct tx_ring *tx_ring;
2641        u32 tx_ring_idx = (u32) skb->queue_mapping;
2642
2643        tx_ring = &qdev->tx_ring[tx_ring_idx];
2644
2645        if (skb_padto(skb, ETH_ZLEN))
2646                return NETDEV_TX_OK;
2647
2648        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2649                netif_info(qdev, tx_queued, qdev->ndev,
2650                           "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2651                           __func__, tx_ring_idx);
2652                netif_stop_subqueue(ndev, tx_ring->wq_id);
2653                tx_ring->tx_errors++;
2654                return NETDEV_TX_BUSY;
2655        }
2656        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2657        mac_iocb_ptr = tx_ring_desc->queue_entry;
2658        memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2659
2660        mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2661        mac_iocb_ptr->tid = tx_ring_desc->index;
2662        /* We use the upper 32-bits to store the tx queue for this IO.
2663         * When we get the completion we can use it to establish the context.
2664         */
2665        mac_iocb_ptr->txq_idx = tx_ring_idx;
2666        tx_ring_desc->skb = skb;
2667
2668        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2669
2670        if (skb_vlan_tag_present(skb)) {
2671                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2672                             "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2673                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2674                mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2675        }
2676        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2677        if (tso < 0) {
2678                dev_kfree_skb_any(skb);
2679                return NETDEV_TX_OK;
2680        } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2681                ql_hw_csum_setup(skb,
2682                                 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2683        }
2684        if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2685                        NETDEV_TX_OK) {
2686                netif_err(qdev, tx_queued, qdev->ndev,
2687                          "Could not map the segments.\n");
2688                tx_ring->tx_errors++;
2689                return NETDEV_TX_BUSY;
2690        }
2691        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2692        tx_ring->prod_idx++;
2693        if (tx_ring->prod_idx == tx_ring->wq_len)
2694                tx_ring->prod_idx = 0;
2695        wmb();
2696
2697        ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2698        netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2699                     "tx queued, slot %d, len %d\n",
2700                     tx_ring->prod_idx, skb->len);
2701
2702        atomic_dec(&tx_ring->tx_count);
2703
2704        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2705                netif_stop_subqueue(ndev, tx_ring->wq_id);
2706                if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2707                        /*
2708                         * The queue got stopped because the tx_ring was full.
2709                         * Wake it up, because it's now at least 25% empty.
2710                         */
2711                        netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2712        }
2713        return NETDEV_TX_OK;
2714}
2715
2716
2717static void ql_free_shadow_space(struct ql_adapter *qdev)
2718{
2719        if (qdev->rx_ring_shadow_reg_area) {
2720                pci_free_consistent(qdev->pdev,
2721                                    PAGE_SIZE,
2722                                    qdev->rx_ring_shadow_reg_area,
2723                                    qdev->rx_ring_shadow_reg_dma);
2724                qdev->rx_ring_shadow_reg_area = NULL;
2725        }
2726        if (qdev->tx_ring_shadow_reg_area) {
2727                pci_free_consistent(qdev->pdev,
2728                                    PAGE_SIZE,
2729                                    qdev->tx_ring_shadow_reg_area,
2730                                    qdev->tx_ring_shadow_reg_dma);
2731                qdev->tx_ring_shadow_reg_area = NULL;
2732        }
2733}
2734
2735static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2736{
2737        qdev->rx_ring_shadow_reg_area =
2738                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2739                                      &qdev->rx_ring_shadow_reg_dma);
2740        if (qdev->rx_ring_shadow_reg_area == NULL) {
2741                netif_err(qdev, ifup, qdev->ndev,
2742                          "Allocation of RX shadow space failed.\n");
2743                return -ENOMEM;
2744        }
2745
2746        qdev->tx_ring_shadow_reg_area =
2747                pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2748                                      &qdev->tx_ring_shadow_reg_dma);
2749        if (qdev->tx_ring_shadow_reg_area == NULL) {
2750                netif_err(qdev, ifup, qdev->ndev,
2751                          "Allocation of TX shadow space failed.\n");
2752                goto err_wqp_sh_area;
2753        }
2754        return 0;
2755
2756err_wqp_sh_area:
2757        pci_free_consistent(qdev->pdev,
2758                            PAGE_SIZE,
2759                            qdev->rx_ring_shadow_reg_area,
2760                            qdev->rx_ring_shadow_reg_dma);
2761        return -ENOMEM;
2762}
2763
2764static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2765{
2766        struct tx_ring_desc *tx_ring_desc;
2767        int i;
2768        struct ob_mac_iocb_req *mac_iocb_ptr;
2769
2770        mac_iocb_ptr = tx_ring->wq_base;
2771        tx_ring_desc = tx_ring->q;
2772        for (i = 0; i < tx_ring->wq_len; i++) {
2773                tx_ring_desc->index = i;
2774                tx_ring_desc->skb = NULL;
2775                tx_ring_desc->queue_entry = mac_iocb_ptr;
2776                mac_iocb_ptr++;
2777                tx_ring_desc++;
2778        }
2779        atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2780}
2781
2782static void ql_free_tx_resources(struct ql_adapter *qdev,
2783                                 struct tx_ring *tx_ring)
2784{
2785        if (tx_ring->wq_base) {
2786                pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2787                                    tx_ring->wq_base, tx_ring->wq_base_dma);
2788                tx_ring->wq_base = NULL;
2789        }
2790        kfree(tx_ring->q);
2791        tx_ring->q = NULL;
2792}
2793
2794static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2795                                 struct tx_ring *tx_ring)
2796{
2797        tx_ring->wq_base =
2798            pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2799                                 &tx_ring->wq_base_dma);
2800
2801        if ((tx_ring->wq_base == NULL) ||
2802            tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2803                goto pci_alloc_err;
2804
2805        tx_ring->q =
2806            kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2807                          GFP_KERNEL);
2808        if (tx_ring->q == NULL)
2809                goto err;
2810
2811        return 0;
2812err:
2813        pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2814                            tx_ring->wq_base, tx_ring->wq_base_dma);
2815        tx_ring->wq_base = NULL;
2816pci_alloc_err:
2817        netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2818        return -ENOMEM;
2819}
2820
2821static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2822{
2823        struct bq_desc *lbq_desc;
2824
2825        uint32_t  curr_idx, clean_idx;
2826
2827        curr_idx = rx_ring->lbq_curr_idx;
2828        clean_idx = rx_ring->lbq_clean_idx;
2829        while (curr_idx != clean_idx) {
2830                lbq_desc = &rx_ring->lbq[curr_idx];
2831
2832                if (lbq_desc->p.pg_chunk.last_flag) {
2833                        pci_unmap_page(qdev->pdev,
2834                                lbq_desc->p.pg_chunk.map,
2835                                ql_lbq_block_size(qdev),
2836                                       PCI_DMA_FROMDEVICE);
2837                        lbq_desc->p.pg_chunk.last_flag = 0;
2838                }
2839
2840                put_page(lbq_desc->p.pg_chunk.page);
2841                lbq_desc->p.pg_chunk.page = NULL;
2842
2843                if (++curr_idx == rx_ring->lbq_len)
2844                        curr_idx = 0;
2845
2846        }
2847        if (rx_ring->pg_chunk.page) {
2848                pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2849                        ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2850                put_page(rx_ring->pg_chunk.page);
2851                rx_ring->pg_chunk.page = NULL;
2852        }
2853}
2854
2855static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2856{
2857        int i;
2858        struct bq_desc *sbq_desc;
2859
2860        for (i = 0; i < rx_ring->sbq_len; i++) {
2861                sbq_desc = &rx_ring->sbq[i];
2862                if (sbq_desc == NULL) {
2863                        netif_err(qdev, ifup, qdev->ndev,
2864                                  "sbq_desc %d is NULL.\n", i);
2865                        return;
2866                }
2867                if (sbq_desc->p.skb) {
2868                        pci_unmap_single(qdev->pdev,
2869                                         dma_unmap_addr(sbq_desc, mapaddr),
2870                                         dma_unmap_len(sbq_desc, maplen),
2871                                         PCI_DMA_FROMDEVICE);
2872                        dev_kfree_skb(sbq_desc->p.skb);
2873                        sbq_desc->p.skb = NULL;
2874                }
2875        }
2876}
2877
2878/* Free all large and small rx buffers associated
2879 * with the completion queues for this device.
2880 */
2881static void ql_free_rx_buffers(struct ql_adapter *qdev)
2882{
2883        int i;
2884        struct rx_ring *rx_ring;
2885
2886        for (i = 0; i < qdev->rx_ring_count; i++) {
2887                rx_ring = &qdev->rx_ring[i];
2888                if (rx_ring->lbq)
2889                        ql_free_lbq_buffers(qdev, rx_ring);
2890                if (rx_ring->sbq)
2891                        ql_free_sbq_buffers(qdev, rx_ring);
2892        }
2893}
2894
2895static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2896{
2897        struct rx_ring *rx_ring;
2898        int i;
2899
2900        for (i = 0; i < qdev->rx_ring_count; i++) {
2901                rx_ring = &qdev->rx_ring[i];
2902                if (rx_ring->type != TX_Q)
2903                        ql_update_buffer_queues(qdev, rx_ring);
2904        }
2905}
2906
2907static void ql_init_lbq_ring(struct ql_adapter *qdev,
2908                                struct rx_ring *rx_ring)
2909{
2910        int i;
2911        struct bq_desc *lbq_desc;
2912        __le64 *bq = rx_ring->lbq_base;
2913
2914        memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2915        for (i = 0; i < rx_ring->lbq_len; i++) {
2916                lbq_desc = &rx_ring->lbq[i];
2917                memset(lbq_desc, 0, sizeof(*lbq_desc));
2918                lbq_desc->index = i;
2919                lbq_desc->addr = bq;
2920                bq++;
2921        }
2922}
2923
2924static void ql_init_sbq_ring(struct ql_adapter *qdev,
2925                                struct rx_ring *rx_ring)
2926{
2927        int i;
2928        struct bq_desc *sbq_desc;
2929        __le64 *bq = rx_ring->sbq_base;
2930
2931        memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2932        for (i = 0; i < rx_ring->sbq_len; i++) {
2933                sbq_desc = &rx_ring->sbq[i];
2934                memset(sbq_desc, 0, sizeof(*sbq_desc));
2935                sbq_desc->index = i;
2936                sbq_desc->addr = bq;
2937                bq++;
2938        }
2939}
2940
2941static void ql_free_rx_resources(struct ql_adapter *qdev,
2942                                 struct rx_ring *rx_ring)
2943{
2944        /* Free the small buffer queue. */
2945        if (rx_ring->sbq_base) {
2946                pci_free_consistent(qdev->pdev,
2947                                    rx_ring->sbq_size,
2948                                    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2949                rx_ring->sbq_base = NULL;
2950        }
2951
2952        /* Free the small buffer queue control blocks. */
2953        kfree(rx_ring->sbq);
2954        rx_ring->sbq = NULL;
2955
2956        /* Free the large buffer queue. */
2957        if (rx_ring->lbq_base) {
2958                pci_free_consistent(qdev->pdev,
2959                                    rx_ring->lbq_size,
2960                                    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2961                rx_ring->lbq_base = NULL;
2962        }
2963
2964        /* Free the large buffer queue control blocks. */
2965        kfree(rx_ring->lbq);
2966        rx_ring->lbq = NULL;
2967
2968        /* Free the rx queue. */
2969        if (rx_ring->cq_base) {
2970                pci_free_consistent(qdev->pdev,
2971                                    rx_ring->cq_size,
2972                                    rx_ring->cq_base, rx_ring->cq_base_dma);
2973                rx_ring->cq_base = NULL;
2974        }
2975}
2976
2977/* Allocate queues and buffers for this completions queue based
2978 * on the values in the parameter structure. */
2979static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2980                                 struct rx_ring *rx_ring)
2981{
2982
2983        /*
2984         * Allocate the completion queue for this rx_ring.
2985         */
2986        rx_ring->cq_base =
2987            pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2988                                 &rx_ring->cq_base_dma);
2989
2990        if (rx_ring->cq_base == NULL) {
2991                netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2992                return -ENOMEM;
2993        }
2994
2995        if (rx_ring->sbq_len) {
2996                /*
2997                 * Allocate small buffer queue.
2998                 */
2999                rx_ring->sbq_base =
3000                    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3001                                         &rx_ring->sbq_base_dma);
3002
3003                if (rx_ring->sbq_base == NULL) {
3004                        netif_err(qdev, ifup, qdev->ndev,
3005                                  "Small buffer queue allocation failed.\n");
3006                        goto err_mem;
3007                }
3008
3009                /*
3010                 * Allocate small buffer queue control blocks.
3011                 */
3012                rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3013                                             sizeof(struct bq_desc),
3014                                             GFP_KERNEL);
3015                if (rx_ring->sbq == NULL)
3016                        goto err_mem;
3017
3018                ql_init_sbq_ring(qdev, rx_ring);
3019        }
3020
3021        if (rx_ring->lbq_len) {
3022                /*
3023                 * Allocate large buffer queue.
3024                 */
3025                rx_ring->lbq_base =
3026                    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3027                                         &rx_ring->lbq_base_dma);
3028
3029                if (rx_ring->lbq_base == NULL) {
3030                        netif_err(qdev, ifup, qdev->ndev,
3031                                  "Large buffer queue allocation failed.\n");
3032                        goto err_mem;
3033                }
3034                /*
3035                 * Allocate large buffer queue control blocks.
3036                 */
3037                rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3038                                             sizeof(struct bq_desc),
3039                                             GFP_KERNEL);
3040                if (rx_ring->lbq == NULL)
3041                        goto err_mem;
3042
3043                ql_init_lbq_ring(qdev, rx_ring);
3044        }
3045
3046        return 0;
3047
3048err_mem:
3049        ql_free_rx_resources(qdev, rx_ring);
3050        return -ENOMEM;
3051}
3052
3053static void ql_tx_ring_clean(struct ql_adapter *qdev)
3054{
3055        struct tx_ring *tx_ring;
3056        struct tx_ring_desc *tx_ring_desc;
3057        int i, j;
3058
3059        /*
3060         * Loop through all queues and free
3061         * any resources.
3062         */
3063        for (j = 0; j < qdev->tx_ring_count; j++) {
3064                tx_ring = &qdev->tx_ring[j];
3065                for (i = 0; i < tx_ring->wq_len; i++) {
3066                        tx_ring_desc = &tx_ring->q[i];
3067                        if (tx_ring_desc && tx_ring_desc->skb) {
3068                                netif_err(qdev, ifdown, qdev->ndev,
3069                                          "Freeing lost SKB %p, from queue %d, index %d.\n",
3070                                          tx_ring_desc->skb, j,
3071                                          tx_ring_desc->index);
3072                                ql_unmap_send(qdev, tx_ring_desc,
3073                                              tx_ring_desc->map_cnt);
3074                                dev_kfree_skb(tx_ring_desc->skb);
3075                                tx_ring_desc->skb = NULL;
3076                        }
3077                }
3078        }
3079}
3080
3081static void ql_free_mem_resources(struct ql_adapter *qdev)
3082{
3083        int i;
3084
3085        for (i = 0; i < qdev->tx_ring_count; i++)
3086                ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3087        for (i = 0; i < qdev->rx_ring_count; i++)
3088                ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3089        ql_free_shadow_space(qdev);
3090}
3091
3092static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3093{
3094        int i;
3095
3096        /* Allocate space for our shadow registers and such. */
3097        if (ql_alloc_shadow_space(qdev))
3098                return -ENOMEM;
3099
3100        for (i = 0; i < qdev->rx_ring_count; i++) {
3101                if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3102                        netif_err(qdev, ifup, qdev->ndev,
3103                                  "RX resource allocation failed.\n");
3104                        goto err_mem;
3105                }
3106        }
3107        /* Allocate tx queue resources */
3108        for (i = 0; i < qdev->tx_ring_count; i++) {
3109                if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3110                        netif_err(qdev, ifup, qdev->ndev,
3111                                  "TX resource allocation failed.\n");
3112                        goto err_mem;
3113                }
3114        }
3115        return 0;
3116
3117err_mem:
3118        ql_free_mem_resources(qdev);
3119        return -ENOMEM;
3120}
3121
3122/* Set up the rx ring control block and pass it to the chip.
3123 * The control block is defined as
3124 * "Completion Queue Initialization Control Block", or cqicb.
3125 */
3126static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3127{
3128        struct cqicb *cqicb = &rx_ring->cqicb;
3129        void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3130                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3131        u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3132                (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3133        void __iomem *doorbell_area =
3134            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3135        int err = 0;
3136        u16 bq_len;
3137        u64 tmp;
3138        __le64 *base_indirect_ptr;
3139        int page_entries;
3140
3141        /* Set up the shadow registers for this ring. */
3142        rx_ring->prod_idx_sh_reg = shadow_reg;
3143        rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3144        *rx_ring->prod_idx_sh_reg = 0;
3145        shadow_reg += sizeof(u64);
3146        shadow_reg_dma += sizeof(u64);
3147        rx_ring->lbq_base_indirect = shadow_reg;
3148        rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3149        shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3150        shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3151        rx_ring->sbq_base_indirect = shadow_reg;
3152        rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3153
3154        /* PCI doorbell mem area + 0x00 for consumer index register */
3155        rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3156        rx_ring->cnsmr_idx = 0;
3157        rx_ring->curr_entry = rx_ring->cq_base;
3158
3159        /* PCI doorbell mem area + 0x04 for valid register */
3160        rx_ring->valid_db_reg = doorbell_area + 0x04;
3161
3162        /* PCI doorbell mem area + 0x18 for large buffer consumer */
3163        rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3164
3165        /* PCI doorbell mem area + 0x1c */
3166        rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3167
3168        memset((void *)cqicb, 0, sizeof(struct cqicb));
3169        cqicb->msix_vect = rx_ring->irq;
3170
3171        bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3172        cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3173
3174        cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3175
3176        cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3177
3178        /*
3179         * Set up the control block load flags.
3180         */
3181        cqicb->flags = FLAGS_LC |       /* Load queue base address */
3182            FLAGS_LV |          /* Load MSI-X vector */
3183            FLAGS_LI;           /* Load irq delay values */
3184        if (rx_ring->lbq_len) {
3185                cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3186                tmp = (u64)rx_ring->lbq_base_dma;
3187                base_indirect_ptr = rx_ring->lbq_base_indirect;
3188                page_entries = 0;
3189                do {
3190                        *base_indirect_ptr = cpu_to_le64(tmp);
3191                        tmp += DB_PAGE_SIZE;
3192                        base_indirect_ptr++;
3193                        page_entries++;
3194                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3195                cqicb->lbq_addr =
3196                    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3197                bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3198                        (u16) rx_ring->lbq_buf_size;
3199                cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3200                bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3201                        (u16) rx_ring->lbq_len;
3202                cqicb->lbq_len = cpu_to_le16(bq_len);
3203                rx_ring->lbq_prod_idx = 0;
3204                rx_ring->lbq_curr_idx = 0;
3205                rx_ring->lbq_clean_idx = 0;
3206                rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3207        }
3208        if (rx_ring->sbq_len) {
3209                cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3210                tmp = (u64)rx_ring->sbq_base_dma;
3211                base_indirect_ptr = rx_ring->sbq_base_indirect;
3212                page_entries = 0;
3213                do {
3214                        *base_indirect_ptr = cpu_to_le64(tmp);
3215                        tmp += DB_PAGE_SIZE;
3216                        base_indirect_ptr++;
3217                        page_entries++;
3218                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3219                cqicb->sbq_addr =
3220                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3221                cqicb->sbq_buf_size =
3222                    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3223                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3224                        (u16) rx_ring->sbq_len;
3225                cqicb->sbq_len = cpu_to_le16(bq_len);
3226                rx_ring->sbq_prod_idx = 0;
3227                rx_ring->sbq_curr_idx = 0;
3228                rx_ring->sbq_clean_idx = 0;
3229                rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3230        }
3231        switch (rx_ring->type) {
3232        case TX_Q:
3233                cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3234                cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3235                break;
3236        case RX_Q:
3237                /* Inbound completion handling rx_rings run in
3238                 * separate NAPI contexts.
3239                 */
3240                netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3241                               64);
3242                cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3243                cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3244                break;
3245        default:
3246                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3247                             "Invalid rx_ring->type = %d.\n", rx_ring->type);
3248        }
3249        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3250                           CFG_LCQ, rx_ring->cq_id);
3251        if (err) {
3252                netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3253                return err;
3254        }
3255        return err;
3256}
3257
3258static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3259{
3260        struct wqicb *wqicb = (struct wqicb *)tx_ring;
3261        void __iomem *doorbell_area =
3262            qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3263        void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3264            (tx_ring->wq_id * sizeof(u64));
3265        u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3266            (tx_ring->wq_id * sizeof(u64));
3267        int err = 0;
3268
3269        /*
3270         * Assign doorbell registers for this tx_ring.
3271         */
3272        /* TX PCI doorbell mem area for tx producer index */
3273        tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3274        tx_ring->prod_idx = 0;
3275        /* TX PCI doorbell mem area + 0x04 */
3276        tx_ring->valid_db_reg = doorbell_area + 0x04;
3277
3278        /*
3279         * Assign shadow registers for this tx_ring.
3280         */
3281        tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3282        tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3283
3284        wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3285        wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3286                                   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3287        wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3288        wqicb->rid = 0;
3289        wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3290
3291        wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3292
3293        ql_init_tx_ring(qdev, tx_ring);
3294
3295        err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3296                           (u16) tx_ring->wq_id);
3297        if (err) {
3298                netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3299                return err;
3300        }
3301        return err;
3302}
3303
3304static void ql_disable_msix(struct ql_adapter *qdev)
3305{
3306        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3307                pci_disable_msix(qdev->pdev);
3308                clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3309                kfree(qdev->msi_x_entry);
3310                qdev->msi_x_entry = NULL;
3311        } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3312                pci_disable_msi(qdev->pdev);
3313                clear_bit(QL_MSI_ENABLED, &qdev->flags);
3314        }
3315}
3316
3317/* We start by trying to get the number of vectors
3318 * stored in qdev->intr_count. If we don't get that
3319 * many then we reduce the count and try again.
3320 */
3321static void ql_enable_msix(struct ql_adapter *qdev)
3322{
3323        int i, err;
3324
3325        /* Get the MSIX vectors. */
3326        if (qlge_irq_type == MSIX_IRQ) {
3327                /* Try to alloc space for the msix struct,
3328                 * if it fails then go to MSI/legacy.
3329                 */
3330                qdev->msi_x_entry = kcalloc(qdev->intr_count,
3331                                            sizeof(struct msix_entry),
3332                                            GFP_KERNEL);
3333                if (!qdev->msi_x_entry) {
3334                        qlge_irq_type = MSI_IRQ;
3335                        goto msi;
3336                }
3337
3338                for (i = 0; i < qdev->intr_count; i++)
3339                        qdev->msi_x_entry[i].entry = i;
3340
3341                err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3342                                            1, qdev->intr_count);
3343                if (err < 0) {
3344                        kfree(qdev->msi_x_entry);
3345                        qdev->msi_x_entry = NULL;
3346                        netif_warn(qdev, ifup, qdev->ndev,
3347                                   "MSI-X Enable failed, trying MSI.\n");
3348                        qlge_irq_type = MSI_IRQ;
3349                } else {
3350                        qdev->intr_count = err;
3351                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
3352                        netif_info(qdev, ifup, qdev->ndev,
3353                                   "MSI-X Enabled, got %d vectors.\n",
3354                                   qdev->intr_count);
3355                        return;
3356                }
3357        }
3358msi:
3359        qdev->intr_count = 1;
3360        if (qlge_irq_type == MSI_IRQ) {
3361                if (!pci_enable_msi(qdev->pdev)) {
3362                        set_bit(QL_MSI_ENABLED, &qdev->flags);
3363                        netif_info(qdev, ifup, qdev->ndev,
3364                                   "Running with MSI interrupts.\n");
3365                        return;
3366                }
3367        }
3368        qlge_irq_type = LEG_IRQ;
3369        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3370                     "Running with legacy interrupts.\n");
3371}
3372
3373/* Each vector services 1 RSS ring and and 1 or more
3374 * TX completion rings.  This function loops through
3375 * the TX completion rings and assigns the vector that
3376 * will service it.  An example would be if there are
3377 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3378 * This would mean that vector 0 would service RSS ring 0
3379 * and TX completion rings 0,1,2 and 3.  Vector 1 would
3380 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3381 */
3382static void ql_set_tx_vect(struct ql_adapter *qdev)
3383{
3384        int i, j, vect;
3385        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3386
3387        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3388                /* Assign irq vectors to TX rx_rings.*/
3389                for (vect = 0, j = 0, i = qdev->rss_ring_count;
3390                                         i < qdev->rx_ring_count; i++) {
3391                        if (j == tx_rings_per_vector) {
3392                                vect++;
3393                                j = 0;
3394                        }
3395                        qdev->rx_ring[i].irq = vect;
3396                        j++;
3397                }
3398        } else {
3399                /* For single vector all rings have an irq
3400                 * of zero.
3401                 */
3402                for (i = 0; i < qdev->rx_ring_count; i++)
3403                        qdev->rx_ring[i].irq = 0;
3404        }
3405}
3406
3407/* Set the interrupt mask for this vector.  Each vector
3408 * will service 1 RSS ring and 1 or more TX completion
3409 * rings.  This function sets up a bit mask per vector
3410 * that indicates which rings it services.
3411 */
3412static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3413{
3414        int j, vect = ctx->intr;
3415        u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3416
3417        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3418                /* Add the RSS ring serviced by this vector
3419                 * to the mask.
3420                 */
3421                ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3422                /* Add the TX ring(s) serviced by this vector
3423                 * to the mask. */
3424                for (j = 0; j < tx_rings_per_vector; j++) {
3425                        ctx->irq_mask |=
3426                        (1 << qdev->rx_ring[qdev->rss_ring_count +
3427                        (vect * tx_rings_per_vector) + j].cq_id);
3428                }
3429        } else {
3430                /* For single vector we just shift each queue's
3431                 * ID into the mask.
3432                 */
3433                for (j = 0; j < qdev->rx_ring_count; j++)
3434                        ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3435        }
3436}
3437
3438/*
3439 * Here we build the intr_context structures based on
3440 * our rx_ring count and intr vector count.
3441 * The intr_context structure is used to hook each vector
3442 * to possibly different handlers.
3443 */
3444static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3445{
3446        int i = 0;
3447        struct intr_context *intr_context = &qdev->intr_context[0];
3448
3449        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3450                /* Each rx_ring has it's
3451                 * own intr_context since we have separate
3452                 * vectors for each queue.
3453                 */
3454                for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3455                        qdev->rx_ring[i].irq = i;
3456                        intr_context->intr = i;
3457                        intr_context->qdev = qdev;
3458                        /* Set up this vector's bit-mask that indicates
3459                         * which queues it services.
3460                         */
3461                        ql_set_irq_mask(qdev, intr_context);
3462                        /*
3463                         * We set up each vectors enable/disable/read bits so
3464                         * there's no bit/mask calculations in the critical path.
3465                         */
3466                        intr_context->intr_en_mask =
3467                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3468                            INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3469                            | i;
3470                        intr_context->intr_dis_mask =
3471                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3472                            INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3473                            INTR_EN_IHD | i;
3474                        intr_context->intr_read_mask =
3475                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3476                            INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3477                            i;
3478                        if (i == 0) {
3479                                /* The first vector/queue handles
3480                                 * broadcast/multicast, fatal errors,
3481                                 * and firmware events.  This in addition
3482                                 * to normal inbound NAPI processing.
3483                                 */
3484                                intr_context->handler = qlge_isr;
3485                                sprintf(intr_context->name, "%s-rx-%d",
3486                                        qdev->ndev->name, i);
3487                        } else {
3488                                /*
3489                                 * Inbound queues handle unicast frames only.
3490                                 */
3491                                intr_context->handler = qlge_msix_rx_isr;
3492                                sprintf(intr_context->name, "%s-rx-%d",
3493                                        qdev->ndev->name, i);
3494                        }
3495                }
3496        } else {
3497                /*
3498                 * All rx_rings use the same intr_context since
3499                 * there is only one vector.
3500                 */
3501                intr_context->intr = 0;
3502                intr_context->qdev = qdev;
3503                /*
3504                 * We set up each vectors enable/disable/read bits so
3505                 * there's no bit/mask calculations in the critical path.
3506                 */
3507                intr_context->intr_en_mask =
3508                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3509                intr_context->intr_dis_mask =
3510                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3511                    INTR_EN_TYPE_DISABLE;
3512                intr_context->intr_read_mask =
3513                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3514                /*
3515                 * Single interrupt means one handler for all rings.
3516                 */
3517                intr_context->handler = qlge_isr;
3518                sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3519                /* Set up this vector's bit-mask that indicates
3520                 * which queues it services. In this case there is
3521                 * a single vector so it will service all RSS and
3522                 * TX completion rings.
3523                 */
3524                ql_set_irq_mask(qdev, intr_context);
3525        }
3526        /* Tell the TX completion rings which MSIx vector
3527         * they will be using.
3528         */
3529        ql_set_tx_vect(qdev);
3530}
3531
3532static void ql_free_irq(struct ql_adapter *qdev)
3533{
3534        int i;
3535        struct intr_context *intr_context = &qdev->intr_context[0];
3536
3537        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3538                if (intr_context->hooked) {
3539                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3540                                free_irq(qdev->msi_x_entry[i].vector,
3541                                         &qdev->rx_ring[i]);
3542                        } else {
3543                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3544                        }
3545                }
3546        }
3547        ql_disable_msix(qdev);
3548}
3549
3550static int ql_request_irq(struct ql_adapter *qdev)
3551{
3552        int i;
3553        int status = 0;
3554        struct pci_dev *pdev = qdev->pdev;
3555        struct intr_context *intr_context = &qdev->intr_context[0];
3556
3557        ql_resolve_queues_to_irqs(qdev);
3558
3559        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3560                atomic_set(&intr_context->irq_cnt, 0);
3561                if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3562                        status = request_irq(qdev->msi_x_entry[i].vector,
3563                                             intr_context->handler,
3564                                             0,
3565                                             intr_context->name,
3566                                             &qdev->rx_ring[i]);
3567                        if (status) {
3568                                netif_err(qdev, ifup, qdev->ndev,
3569                                          "Failed request for MSIX interrupt %d.\n",
3570                                          i);
3571                                goto err_irq;
3572                        }
3573                } else {
3574                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3575                                     "trying msi or legacy interrupts.\n");
3576                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577                                     "%s: irq = %d.\n", __func__, pdev->irq);
3578                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3579                                     "%s: context->name = %s.\n", __func__,
3580                                     intr_context->name);
3581                        netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3582                                     "%s: dev_id = 0x%p.\n", __func__,
3583                                     &qdev->rx_ring[0]);
3584                        status =
3585                            request_irq(pdev->irq, qlge_isr,
3586                                        test_bit(QL_MSI_ENABLED,
3587                                                 &qdev->
3588                                                 flags) ? 0 : IRQF_SHARED,
3589                                        intr_context->name, &qdev->rx_ring[0]);
3590                        if (status)
3591                                goto err_irq;
3592
3593                        netif_err(qdev, ifup, qdev->ndev,
3594                                  "Hooked intr %d, queue type %s, with name %s.\n",
3595                                  i,
3596                                  qdev->rx_ring[0].type == DEFAULT_Q ?
3597                                  "DEFAULT_Q" :
3598                                  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3599                                  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3600                                  intr_context->name);
3601                }
3602                intr_context->hooked = 1;
3603        }
3604        return status;
3605err_irq:
3606        netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3607        ql_free_irq(qdev);
3608        return status;
3609}
3610
3611static int ql_start_rss(struct ql_adapter *qdev)
3612{
3613        static const u8 init_hash_seed[] = {
3614                0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3615                0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3616                0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3617                0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3618                0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3619        };
3620        struct ricb *ricb = &qdev->ricb;
3621        int status = 0;
3622        int i;
3623        u8 *hash_id = (u8 *) ricb->hash_cq_id;
3624
3625        memset((void *)ricb, 0, sizeof(*ricb));
3626
3627        ricb->base_cq = RSS_L4K;
3628        ricb->flags =
3629                (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3630        ricb->mask = cpu_to_le16((u16)(0x3ff));
3631
3632        /*
3633         * Fill out the Indirection Table.
3634         */
3635        for (i = 0; i < 1024; i++)
3636                hash_id[i] = (i & (qdev->rss_ring_count - 1));
3637
3638        memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3639        memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3640
3641        status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3642        if (status) {
3643                netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3644                return status;
3645        }
3646        return status;
3647}
3648
3649static int ql_clear_routing_entries(struct ql_adapter *qdev)
3650{
3651        int i, status = 0;
3652
3653        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3654        if (status)
3655                return status;
3656        /* Clear all the entries in the routing table. */
3657        for (i = 0; i < 16; i++) {
3658                status = ql_set_routing_reg(qdev, i, 0, 0);
3659                if (status) {
3660                        netif_err(qdev, ifup, qdev->ndev,
3661                                  "Failed to init routing register for CAM packets.\n");
3662                        break;
3663                }
3664        }
3665        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3666        return status;
3667}
3668
3669/* Initialize the frame-to-queue routing. */
3670static int ql_route_initialize(struct ql_adapter *qdev)
3671{
3672        int status = 0;
3673
3674        /* Clear all the entries in the routing table. */
3675        status = ql_clear_routing_entries(qdev);
3676        if (status)
3677                return status;
3678
3679        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3680        if (status)
3681                return status;
3682
3683        status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3684                                                RT_IDX_IP_CSUM_ERR, 1);
3685        if (status) {
3686                netif_err(qdev, ifup, qdev->ndev,
3687                        "Failed to init routing register "
3688                        "for IP CSUM error packets.\n");
3689                goto exit;
3690        }
3691        status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3692                                                RT_IDX_TU_CSUM_ERR, 1);
3693        if (status) {
3694                netif_err(qdev, ifup, qdev->ndev,
3695                        "Failed to init routing register "
3696                        "for TCP/UDP CSUM error packets.\n");
3697                goto exit;
3698        }
3699        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3700        if (status) {
3701                netif_err(qdev, ifup, qdev->ndev,
3702                          "Failed to init routing register for broadcast packets.\n");
3703                goto exit;
3704        }
3705        /* If we have more than one inbound queue, then turn on RSS in the
3706         * routing block.
3707         */
3708        if (qdev->rss_ring_count > 1) {
3709                status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3710                                        RT_IDX_RSS_MATCH, 1);
3711                if (status) {
3712                        netif_err(qdev, ifup, qdev->ndev,
3713                                  "Failed to init routing register for MATCH RSS packets.\n");
3714                        goto exit;
3715                }
3716        }
3717
3718        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3719                                    RT_IDX_CAM_HIT, 1);
3720        if (status)
3721                netif_err(qdev, ifup, qdev->ndev,
3722                          "Failed to init routing register for CAM packets.\n");
3723exit:
3724        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3725        return status;
3726}
3727
3728int ql_cam_route_initialize(struct ql_adapter *qdev)
3729{
3730        int status, set;
3731
3732        /* If check if the link is up and use to
3733         * determine if we are setting or clearing
3734         * the MAC address in the CAM.
3735         */
3736        set = ql_read32(qdev, STS);
3737        set &= qdev->port_link_up;
3738        status = ql_set_mac_addr(qdev, set);
3739        if (status) {
3740                netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3741                return status;
3742        }
3743
3744        status = ql_route_initialize(qdev);
3745        if (status)
3746                netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3747
3748        return status;
3749}
3750
3751static int ql_adapter_initialize(struct ql_adapter *qdev)
3752{
3753        u32 value, mask;
3754        int i;
3755        int status = 0;
3756
3757        /*
3758         * Set up the System register to halt on errors.
3759         */
3760        value = SYS_EFE | SYS_FAE;
3761        mask = value << 16;
3762        ql_write32(qdev, SYS, mask | value);
3763
3764        /* Set the default queue, and VLAN behavior. */
3765        value = NIC_RCV_CFG_DFQ;
3766        mask = NIC_RCV_CFG_DFQ_MASK;
3767        if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3768                value |= NIC_RCV_CFG_RV;
3769                mask |= (NIC_RCV_CFG_RV << 16);
3770        }
3771        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3772
3773        /* Set the MPI interrupt to enabled. */
3774        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3775
3776        /* Enable the function, set pagesize, enable error checking. */
3777        value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3778            FSC_EC | FSC_VM_PAGE_4K;
3779        value |= SPLT_SETTING;
3780
3781        /* Set/clear header splitting. */
3782        mask = FSC_VM_PAGESIZE_MASK |
3783            FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3784        ql_write32(qdev, FSC, mask | value);
3785
3786        ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3787
3788        /* Set RX packet routing to use port/pci function on which the
3789         * packet arrived on in addition to usual frame routing.
3790         * This is helpful on bonding where both interfaces can have
3791         * the same MAC address.
3792         */
3793        ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3794        /* Reroute all packets to our Interface.
3795         * They may have been routed to MPI firmware
3796         * due to WOL.
3797         */
3798        value = ql_read32(qdev, MGMT_RCV_CFG);
3799        value &= ~MGMT_RCV_CFG_RM;
3800        mask = 0xffff0000;
3801
3802        /* Sticky reg needs clearing due to WOL. */
3803        ql_write32(qdev, MGMT_RCV_CFG, mask);
3804        ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3805
3806        /* Default WOL is enable on Mezz cards */
3807        if (qdev->pdev->subsystem_device == 0x0068 ||
3808                        qdev->pdev->subsystem_device == 0x0180)
3809                qdev->wol = WAKE_MAGIC;
3810
3811        /* Start up the rx queues. */
3812        for (i = 0; i < qdev->rx_ring_count; i++) {
3813                status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3814                if (status) {
3815                        netif_err(qdev, ifup, qdev->ndev,
3816                                  "Failed to start rx ring[%d].\n", i);
3817                        return status;
3818                }
3819        }
3820
3821        /* If there is more than one inbound completion queue
3822         * then download a RICB to configure RSS.
3823         */
3824        if (qdev->rss_ring_count > 1) {
3825                status = ql_start_rss(qdev);
3826                if (status) {
3827                        netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3828                        return status;
3829                }
3830        }
3831
3832        /* Start up the tx queues. */
3833        for (i = 0; i < qdev->tx_ring_count; i++) {
3834                status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3835                if (status) {
3836                        netif_err(qdev, ifup, qdev->ndev,
3837                                  "Failed to start tx ring[%d].\n", i);
3838                        return status;
3839                }
3840        }
3841
3842        /* Initialize the port and set the max framesize. */
3843        status = qdev->nic_ops->port_initialize(qdev);
3844        if (status)
3845                netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3846
3847        /* Set up the MAC address and frame routing filter. */
3848        status = ql_cam_route_initialize(qdev);
3849        if (status) {
3850                netif_err(qdev, ifup, qdev->ndev,
3851                          "Failed to init CAM/Routing tables.\n");
3852                return status;
3853        }
3854
3855        /* Start NAPI for the RSS queues. */
3856        for (i = 0; i < qdev->rss_ring_count; i++)
3857                napi_enable(&qdev->rx_ring[i].napi);
3858
3859        return status;
3860}
3861
3862/* Issue soft reset to chip. */
3863static int ql_adapter_reset(struct ql_adapter *qdev)
3864{
3865        u32 value;
3866        int status = 0;
3867        unsigned long end_jiffies;
3868
3869        /* Clear all the entries in the routing table. */
3870        status = ql_clear_routing_entries(qdev);
3871        if (status) {
3872                netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3873                return status;
3874        }
3875
3876        /* Check if bit is set then skip the mailbox command and
3877         * clear the bit, else we are in normal reset process.
3878         */
3879        if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3880                /* Stop management traffic. */
3881                ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3882
3883                /* Wait for the NIC and MGMNT FIFOs to empty. */
3884                ql_wait_fifo_empty(qdev);
3885        } else
3886                clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3887
3888        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3889
3890        end_jiffies = jiffies + usecs_to_jiffies(30);
3891        do {
3892                value = ql_read32(qdev, RST_FO);
3893                if ((value & RST_FO_FR) == 0)
3894                        break;
3895                cpu_relax();
3896        } while (time_before(jiffies, end_jiffies));
3897
3898        if (value & RST_FO_FR) {
3899                netif_err(qdev, ifdown, qdev->ndev,
3900                          "ETIMEDOUT!!! errored out of resetting the chip!\n");
3901                status = -ETIMEDOUT;
3902        }
3903
3904        /* Resume management traffic. */
3905        ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3906        return status;
3907}
3908
3909static void ql_display_dev_info(struct net_device *ndev)
3910{
3911        struct ql_adapter *qdev = netdev_priv(ndev);
3912
3913        netif_info(qdev, probe, qdev->ndev,
3914                   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3915                   "XG Roll = %d, XG Rev = %d.\n",
3916                   qdev->func,
3917                   qdev->port,
3918                   qdev->chip_rev_id & 0x0000000f,
3919                   qdev->chip_rev_id >> 4 & 0x0000000f,
3920                   qdev->chip_rev_id >> 8 & 0x0000000f,
3921                   qdev->chip_rev_id >> 12 & 0x0000000f);
3922        netif_info(qdev, probe, qdev->ndev,
3923                   "MAC address %pM\n", ndev->dev_addr);
3924}
3925
3926static int ql_wol(struct ql_adapter *qdev)
3927{
3928        int status = 0;
3929        u32 wol = MB_WOL_DISABLE;
3930
3931        /* The CAM is still intact after a reset, but if we
3932         * are doing WOL, then we may need to program the
3933         * routing regs. We would also need to issue the mailbox
3934         * commands to instruct the MPI what to do per the ethtool
3935         * settings.
3936         */
3937
3938        if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3939                        WAKE_MCAST | WAKE_BCAST)) {
3940                netif_err(qdev, ifdown, qdev->ndev,
3941                          "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3942                          qdev->wol);
3943                return -EINVAL;
3944        }
3945
3946        if (qdev->wol & WAKE_MAGIC) {
3947                status = ql_mb_wol_set_magic(qdev, 1);
3948                if (status) {
3949                        netif_err(qdev, ifdown, qdev->ndev,
3950                                  "Failed to set magic packet on %s.\n",
3951                                  qdev->ndev->name);
3952                        return status;
3953                } else
3954                        netif_info(qdev, drv, qdev->ndev,
3955                                   "Enabled magic packet successfully on %s.\n",
3956                                   qdev->ndev->name);
3957
3958                wol |= MB_WOL_MAGIC_PKT;
3959        }
3960
3961        if (qdev->wol) {
3962                wol |= MB_WOL_MODE_ON;
3963                status = ql_mb_wol_mode(qdev, wol);
3964                netif_err(qdev, drv, qdev->ndev,
3965                          "WOL %s (wol code 0x%x) on %s\n",
3966                          (status == 0) ? "Successfully set" : "Failed",
3967                          wol, qdev->ndev->name);
3968        }
3969
3970        return status;
3971}
3972
3973static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3974{
3975
3976        /* Don't kill the reset worker thread if we
3977         * are in the process of recovery.
3978         */
3979        if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3980                cancel_delayed_work_sync(&qdev->asic_reset_work);
3981        cancel_delayed_work_sync(&qdev->mpi_reset_work);
3982        cancel_delayed_work_sync(&qdev->mpi_work);
3983        cancel_delayed_work_sync(&qdev->mpi_idc_work);
3984        cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3985        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3986}
3987
3988static int ql_adapter_down(struct ql_adapter *qdev)
3989{
3990        int i, status = 0;
3991
3992        ql_link_off(qdev);
3993
3994        ql_cancel_all_work_sync(qdev);
3995
3996        for (i = 0; i < qdev->rss_ring_count; i++)
3997                napi_disable(&qdev->rx_ring[i].napi);
3998
3999        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4000
4001        ql_disable_interrupts(qdev);
4002
4003        ql_tx_ring_clean(qdev);
4004
4005        /* Call netif_napi_del() from common point.
4006         */
4007        for (i = 0; i < qdev->rss_ring_count; i++)
4008                netif_napi_del(&qdev->rx_ring[i].napi);
4009
4010        status = ql_adapter_reset(qdev);
4011        if (status)
4012                netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4013                          qdev->func);
4014        ql_free_rx_buffers(qdev);
4015
4016        return status;
4017}
4018
4019static int ql_adapter_up(struct ql_adapter *qdev)
4020{
4021        int err = 0;
4022
4023        err = ql_adapter_initialize(qdev);
4024        if (err) {
4025                netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4026                goto err_init;
4027        }
4028        set_bit(QL_ADAPTER_UP, &qdev->flags);
4029        ql_alloc_rx_buffers(qdev);
4030        /* If the port is initialized and the
4031         * link is up the turn on the carrier.
4032         */
4033        if ((ql_read32(qdev, STS) & qdev->port_init) &&
4034                        (ql_read32(qdev, STS) & qdev->port_link_up))
4035                ql_link_on(qdev);
4036        /* Restore rx mode. */
4037        clear_bit(QL_ALLMULTI, &qdev->flags);
4038        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4039        qlge_set_multicast_list(qdev->ndev);
4040
4041        /* Restore vlan setting. */
4042        qlge_restore_vlan(qdev);
4043
4044        ql_enable_interrupts(qdev);
4045        ql_enable_all_completion_interrupts(qdev);
4046        netif_tx_start_all_queues(qdev->ndev);
4047
4048        return 0;
4049err_init:
4050        ql_adapter_reset(qdev);
4051        return err;
4052}
4053
4054static void ql_release_adapter_resources(struct ql_adapter *qdev)
4055{
4056        ql_free_mem_resources(qdev);
4057        ql_free_irq(qdev);
4058}
4059
4060static int ql_get_adapter_resources(struct ql_adapter *qdev)
4061{
4062        int status = 0;
4063
4064        if (ql_alloc_mem_resources(qdev)) {
4065                netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4066                return -ENOMEM;
4067        }
4068        status = ql_request_irq(qdev);
4069        return status;
4070}
4071
4072static int qlge_close(struct net_device *ndev)
4073{
4074        struct ql_adapter *qdev = netdev_priv(ndev);
4075
4076        /* If we hit pci_channel_io_perm_failure
4077         * failure condition, then we already
4078         * brought the adapter down.
4079         */
4080        if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4081                netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4082                clear_bit(QL_EEH_FATAL, &qdev->flags);
4083                return 0;
4084        }
4085
4086        /*
4087         * Wait for device to recover from a reset.
4088         * (Rarely happens, but possible.)
4089         */
4090        while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4091                msleep(1);
4092        ql_adapter_down(qdev);
4093        ql_release_adapter_resources(qdev);
4094        return 0;
4095}
4096
4097static int ql_configure_rings(struct ql_adapter *qdev)
4098{
4099        int i;
4100        struct rx_ring *rx_ring;
4101        struct tx_ring *tx_ring;
4102        int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4103        unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4104                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4105
4106        qdev->lbq_buf_order = get_order(lbq_buf_len);
4107
4108        /* In a perfect world we have one RSS ring for each CPU
4109         * and each has it's own vector.  To do that we ask for
4110         * cpu_cnt vectors.  ql_enable_msix() will adjust the
4111         * vector count to what we actually get.  We then
4112         * allocate an RSS ring for each.
4113         * Essentially, we are doing min(cpu_count, msix_vector_count).
4114         */
4115        qdev->intr_count = cpu_cnt;
4116        ql_enable_msix(qdev);
4117        /* Adjust the RSS ring count to the actual vector count. */
4118        qdev->rss_ring_count = qdev->intr_count;
4119        qdev->tx_ring_count = cpu_cnt;
4120        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4121
4122        for (i = 0; i < qdev->tx_ring_count; i++) {
4123                tx_ring = &qdev->tx_ring[i];
4124                memset((void *)tx_ring, 0, sizeof(*tx_ring));
4125                tx_ring->qdev = qdev;
4126                tx_ring->wq_id = i;
4127                tx_ring->wq_len = qdev->tx_ring_size;
4128                tx_ring->wq_size =
4129                    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4130
4131                /*
4132                 * The completion queue ID for the tx rings start
4133                 * immediately after the rss rings.
4134                 */
4135                tx_ring->cq_id = qdev->rss_ring_count + i;
4136        }
4137
4138        for (i = 0; i < qdev->rx_ring_count; i++) {
4139                rx_ring = &qdev->rx_ring[i];
4140                memset((void *)rx_ring, 0, sizeof(*rx_ring));
4141                rx_ring->qdev = qdev;
4142                rx_ring->cq_id = i;
4143                rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4144                if (i < qdev->rss_ring_count) {
4145                        /*
4146                         * Inbound (RSS) queues.
4147                         */
4148                        rx_ring->cq_len = qdev->rx_ring_size;
4149                        rx_ring->cq_size =
4150                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4151                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4152                        rx_ring->lbq_size =
4153                            rx_ring->lbq_len * sizeof(__le64);
4154                        rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4155                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4156                        rx_ring->sbq_size =
4157                            rx_ring->sbq_len * sizeof(__le64);
4158                        rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4159                        rx_ring->type = RX_Q;
4160                } else {
4161                        /*
4162                         * Outbound queue handles outbound completions only.
4163                         */
4164                        /* outbound cq is same size as tx_ring it services. */
4165                        rx_ring->cq_len = qdev->tx_ring_size;
4166                        rx_ring->cq_size =
4167                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4168                        rx_ring->lbq_len = 0;
4169                        rx_ring->lbq_size = 0;
4170                        rx_ring->lbq_buf_size = 0;
4171                        rx_ring->sbq_len = 0;
4172                        rx_ring->sbq_size = 0;
4173                        rx_ring->sbq_buf_size = 0;
4174                        rx_ring->type = TX_Q;
4175                }
4176        }
4177        return 0;
4178}
4179
4180static int qlge_open(struct net_device *ndev)
4181{
4182        int err = 0;
4183        struct ql_adapter *qdev = netdev_priv(ndev);
4184
4185        err = ql_adapter_reset(qdev);
4186        if (err)
4187                return err;
4188
4189        err = ql_configure_rings(qdev);
4190        if (err)
4191                return err;
4192
4193        err = ql_get_adapter_resources(qdev);
4194        if (err)
4195                goto error_up;
4196
4197        err = ql_adapter_up(qdev);
4198        if (err)
4199                goto error_up;
4200
4201        return err;
4202
4203error_up:
4204        ql_release_adapter_resources(qdev);
4205        return err;
4206}
4207
4208static int ql_change_rx_buffers(struct ql_adapter *qdev)
4209{
4210        struct rx_ring *rx_ring;
4211        int i, status;
4212        u32 lbq_buf_len;
4213
4214        /* Wait for an outstanding reset to complete. */
4215        if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216                int i = 4;
4217
4218                while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4219                        netif_err(qdev, ifup, qdev->ndev,
4220                                  "Waiting for adapter UP...\n");
4221                        ssleep(1);
4222                }
4223
4224                if (!i) {
4225                        netif_err(qdev, ifup, qdev->ndev,
4226                                  "Timed out waiting for adapter UP\n");
4227                        return -ETIMEDOUT;
4228                }
4229        }
4230
4231        status = ql_adapter_down(qdev);
4232        if (status)
4233                goto error;
4234
4235        /* Get the new rx buffer size. */
4236        lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4237                LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4238        qdev->lbq_buf_order = get_order(lbq_buf_len);
4239
4240        for (i = 0; i < qdev->rss_ring_count; i++) {
4241                rx_ring = &qdev->rx_ring[i];
4242                /* Set the new size. */
4243                rx_ring->lbq_buf_size = lbq_buf_len;
4244        }
4245
4246        status = ql_adapter_up(qdev);
4247        if (status)
4248                goto error;
4249
4250        return status;
4251error:
4252        netif_alert(qdev, ifup, qdev->ndev,
4253                    "Driver up/down cycle failed, closing device.\n");
4254        set_bit(QL_ADAPTER_UP, &qdev->flags);
4255        dev_close(qdev->ndev);
4256        return status;
4257}
4258
4259static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4260{
4261        struct ql_adapter *qdev = netdev_priv(ndev);
4262        int status;
4263
4264        if (ndev->mtu == 1500 && new_mtu == 9000) {
4265                netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4266        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4267                netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4268        } else
4269                return -EINVAL;
4270
4271        queue_delayed_work(qdev->workqueue,
4272                        &qdev->mpi_port_cfg_work, 3*HZ);
4273
4274        ndev->mtu = new_mtu;
4275
4276        if (!netif_running(qdev->ndev)) {
4277                return 0;
4278        }
4279
4280        status = ql_change_rx_buffers(qdev);
4281        if (status) {
4282                netif_err(qdev, ifup, qdev->ndev,
4283                          "Changing MTU failed.\n");
4284        }
4285
4286        return status;
4287}
4288
4289static struct net_device_stats *qlge_get_stats(struct net_device
4290                                               *ndev)
4291{
4292        struct ql_adapter *qdev = netdev_priv(ndev);
4293        struct rx_ring *rx_ring = &qdev->rx_ring[0];
4294        struct tx_ring *tx_ring = &qdev->tx_ring[0];
4295        unsigned long pkts, mcast, dropped, errors, bytes;
4296        int i;
4297
4298        /* Get RX stats. */
4299        pkts = mcast = dropped = errors = bytes = 0;
4300        for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4301                        pkts += rx_ring->rx_packets;
4302                        bytes += rx_ring->rx_bytes;
4303                        dropped += rx_ring->rx_dropped;
4304                        errors += rx_ring->rx_errors;
4305                        mcast += rx_ring->rx_multicast;
4306        }
4307        ndev->stats.rx_packets = pkts;
4308        ndev->stats.rx_bytes = bytes;
4309        ndev->stats.rx_dropped = dropped;
4310        ndev->stats.rx_errors = errors;
4311        ndev->stats.multicast = mcast;
4312
4313        /* Get TX stats. */
4314        pkts = errors = bytes = 0;
4315        for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4316                        pkts += tx_ring->tx_packets;
4317                        bytes += tx_ring->tx_bytes;
4318                        errors += tx_ring->tx_errors;
4319        }
4320        ndev->stats.tx_packets = pkts;
4321        ndev->stats.tx_bytes = bytes;
4322        ndev->stats.tx_errors = errors;
4323        return &ndev->stats;
4324}
4325
4326static void qlge_set_multicast_list(struct net_device *ndev)
4327{
4328        struct ql_adapter *qdev = netdev_priv(ndev);
4329        struct netdev_hw_addr *ha;
4330        int i, status;
4331
4332        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4333        if (status)
4334                return;
4335        /*
4336         * Set or clear promiscuous mode if a
4337         * transition is taking place.
4338         */
4339        if (ndev->flags & IFF_PROMISC) {
4340                if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4341                        if (ql_set_routing_reg
4342                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4343                                netif_err(qdev, hw, qdev->ndev,
4344                                          "Failed to set promiscuous mode.\n");
4345                        } else {
4346                                set_bit(QL_PROMISCUOUS, &qdev->flags);
4347                        }
4348                }
4349        } else {
4350                if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4351                        if (ql_set_routing_reg
4352                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4353                                netif_err(qdev, hw, qdev->ndev,
4354                                          "Failed to clear promiscuous mode.\n");
4355                        } else {
4356                                clear_bit(QL_PROMISCUOUS, &qdev->flags);
4357                        }
4358                }
4359        }
4360
4361        /*
4362         * Set or clear all multicast mode if a
4363         * transition is taking place.
4364         */
4365        if ((ndev->flags & IFF_ALLMULTI) ||
4366            (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4367                if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4368                        if (ql_set_routing_reg
4369                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4370                                netif_err(qdev, hw, qdev->ndev,
4371                                          "Failed to set all-multi mode.\n");
4372                        } else {
4373                                set_bit(QL_ALLMULTI, &qdev->flags);
4374                        }
4375                }
4376        } else {
4377                if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4378                        if (ql_set_routing_reg
4379                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4380                                netif_err(qdev, hw, qdev->ndev,
4381                                          "Failed to clear all-multi mode.\n");
4382                        } else {
4383                                clear_bit(QL_ALLMULTI, &qdev->flags);
4384                        }
4385                }
4386        }
4387
4388        if (!netdev_mc_empty(ndev)) {
4389                status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4390                if (status)
4391                        goto exit;
4392                i = 0;
4393                netdev_for_each_mc_addr(ha, ndev) {
4394                        if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4395                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
4396                                netif_err(qdev, hw, qdev->ndev,
4397                                          "Failed to loadmulticast address.\n");
4398                                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4399                                goto exit;
4400                        }
4401                        i++;
4402                }
4403                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4404                if (ql_set_routing_reg
4405                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4406                        netif_err(qdev, hw, qdev->ndev,
4407                                  "Failed to set multicast match mode.\n");
4408                } else {
4409                        set_bit(QL_ALLMULTI, &qdev->flags);
4410                }
4411        }
4412exit:
4413        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4414}
4415
4416static int qlge_set_mac_address(struct net_device *ndev, void *p)
4417{
4418        struct ql_adapter *qdev = netdev_priv(ndev);
4419        struct sockaddr *addr = p;
4420        int status;
4421
4422        if (!is_valid_ether_addr(addr->sa_data))
4423                return -EADDRNOTAVAIL;
4424        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4425        /* Update local copy of current mac address. */
4426        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4427
4428        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4429        if (status)
4430                return status;
4431        status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4432                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4433        if (status)
4434                netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4435        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4436        return status;
4437}
4438
4439static void qlge_tx_timeout(struct net_device *ndev)
4440{
4441        struct ql_adapter *qdev = netdev_priv(ndev);
4442        ql_queue_asic_error(qdev);
4443}
4444
4445static void ql_asic_reset_work(struct work_struct *work)
4446{
4447        struct ql_adapter *qdev =
4448            container_of(work, struct ql_adapter, asic_reset_work.work);
4449        int status;
4450        rtnl_lock();
4451        status = ql_adapter_down(qdev);
4452        if (status)
4453                goto error;
4454
4455        status = ql_adapter_up(qdev);
4456        if (status)
4457                goto error;
4458
4459        /* Restore rx mode. */
4460        clear_bit(QL_ALLMULTI, &qdev->flags);
4461        clear_bit(QL_PROMISCUOUS, &qdev->flags);
4462        qlge_set_multicast_list(qdev->ndev);
4463
4464        rtnl_unlock();
4465        return;
4466error:
4467        netif_alert(qdev, ifup, qdev->ndev,
4468                    "Driver up/down cycle failed, closing device\n");
4469
4470        set_bit(QL_ADAPTER_UP, &qdev->flags);
4471        dev_close(qdev->ndev);
4472        rtnl_unlock();
4473}
4474
4475static const struct nic_operations qla8012_nic_ops = {
4476        .get_flash              = ql_get_8012_flash_params,
4477        .port_initialize        = ql_8012_port_initialize,
4478};
4479
4480static const struct nic_operations qla8000_nic_ops = {
4481        .get_flash              = ql_get_8000_flash_params,
4482        .port_initialize        = ql_8000_port_initialize,
4483};
4484
4485/* Find the pcie function number for the other NIC
4486 * on this chip.  Since both NIC functions share a
4487 * common firmware we have the lowest enabled function
4488 * do any common work.  Examples would be resetting
4489 * after a fatal firmware error, or doing a firmware
4490 * coredump.
4491 */
4492static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4493{
4494        int status = 0;
4495        u32 temp;
4496        u32 nic_func1, nic_func2;
4497
4498        status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4499                        &temp);
4500        if (status)
4501                return status;
4502
4503        nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4504                        MPI_TEST_NIC_FUNC_MASK);
4505        nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4506                        MPI_TEST_NIC_FUNC_MASK);
4507
4508        if (qdev->func == nic_func1)
4509                qdev->alt_func = nic_func2;
4510        else if (qdev->func == nic_func2)
4511                qdev->alt_func = nic_func1;
4512        else
4513                status = -EIO;
4514
4515        return status;
4516}
4517
4518static int ql_get_board_info(struct ql_adapter *qdev)
4519{
4520        int status;
4521        qdev->func =
4522            (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4523        if (qdev->func > 3)
4524                return -EIO;
4525
4526        status = ql_get_alt_pcie_func(qdev);
4527        if (status)
4528                return status;
4529
4530        qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4531        if (qdev->port) {
4532                qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4533                qdev->port_link_up = STS_PL1;
4534                qdev->port_init = STS_PI1;
4535                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4536                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4537        } else {
4538                qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4539                qdev->port_link_up = STS_PL0;
4540                qdev->port_init = STS_PI0;
4541                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4542                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4543        }
4544        qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4545        qdev->device_id = qdev->pdev->device;
4546        if (qdev->device_id == QLGE_DEVICE_ID_8012)
4547                qdev->nic_ops = &qla8012_nic_ops;
4548        else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4549                qdev->nic_ops = &qla8000_nic_ops;
4550        return status;
4551}
4552
4553static void ql_release_all(struct pci_dev *pdev)
4554{
4555        struct net_device *ndev = pci_get_drvdata(pdev);
4556        struct ql_adapter *qdev = netdev_priv(ndev);
4557
4558        if (qdev->workqueue) {
4559                destroy_workqueue(qdev->workqueue);
4560                qdev->workqueue = NULL;
4561        }
4562
4563        if (qdev->reg_base)
4564                iounmap(qdev->reg_base);
4565        if (qdev->doorbell_area)
4566                iounmap(qdev->doorbell_area);
4567        vfree(qdev->mpi_coredump);
4568        pci_release_regions(pdev);
4569}
4570
4571static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4572                          int cards_found)
4573{
4574        struct ql_adapter *qdev = netdev_priv(ndev);
4575        int err = 0;
4576
4577        memset((void *)qdev, 0, sizeof(*qdev));
4578        err = pci_enable_device(pdev);
4579        if (err) {
4580                dev_err(&pdev->dev, "PCI device enable failed.\n");
4581                return err;
4582        }
4583
4584        qdev->ndev = ndev;
4585        qdev->pdev = pdev;
4586        pci_set_drvdata(pdev, ndev);
4587
4588        /* Set PCIe read request size */
4589        err = pcie_set_readrq(pdev, 4096);
4590        if (err) {
4591                dev_err(&pdev->dev, "Set readrq failed.\n");
4592                goto err_out1;
4593        }
4594
4595        err = pci_request_regions(pdev, DRV_NAME);
4596        if (err) {
4597                dev_err(&pdev->dev, "PCI region request failed.\n");
4598                return err;
4599        }
4600
4601        pci_set_master(pdev);
4602        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4603                set_bit(QL_DMA64, &qdev->flags);
4604                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4605        } else {
4606                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4607                if (!err)
4608                       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4609        }
4610
4611        if (err) {
4612                dev_err(&pdev->dev, "No usable DMA configuration.\n");
4613                goto err_out2;
4614        }
4615
4616        /* Set PCIe reset type for EEH to fundamental. */
4617        pdev->needs_freset = 1;
4618        pci_save_state(pdev);
4619        qdev->reg_base =
4620            ioremap_nocache(pci_resource_start(pdev, 1),
4621                            pci_resource_len(pdev, 1));
4622        if (!qdev->reg_base) {
4623                dev_err(&pdev->dev, "Register mapping failed.\n");
4624                err = -ENOMEM;
4625                goto err_out2;
4626        }
4627
4628        qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4629        qdev->doorbell_area =
4630            ioremap_nocache(pci_resource_start(pdev, 3),
4631                            pci_resource_len(pdev, 3));
4632        if (!qdev->doorbell_area) {
4633                dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4634                err = -ENOMEM;
4635                goto err_out2;
4636        }
4637
4638        err = ql_get_board_info(qdev);
4639        if (err) {
4640                dev_err(&pdev->dev, "Register access failed.\n");
4641                err = -EIO;
4642                goto err_out2;
4643        }
4644        qdev->msg_enable = netif_msg_init(debug, default_msg);
4645        spin_lock_init(&qdev->hw_lock);
4646        spin_lock_init(&qdev->stats_lock);
4647
4648        if (qlge_mpi_coredump) {
4649                qdev->mpi_coredump =
4650                        vmalloc(sizeof(struct ql_mpi_coredump));
4651                if (qdev->mpi_coredump == NULL) {
4652                        err = -ENOMEM;
4653                        goto err_out2;
4654                }
4655                if (qlge_force_coredump)
4656                        set_bit(QL_FRC_COREDUMP, &qdev->flags);
4657        }
4658        /* make sure the EEPROM is good */
4659        err = qdev->nic_ops->get_flash(qdev);
4660        if (err) {
4661                dev_err(&pdev->dev, "Invalid FLASH.\n");
4662                goto err_out2;
4663        }
4664
4665        /* Keep local copy of current mac address. */
4666        memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4667
4668        /* Set up the default ring sizes. */
4669        qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4670        qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4671
4672        /* Set up the coalescing parameters. */
4673        qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4674        qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4675        qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4676        qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4677
4678        /*
4679         * Set up the operating parameters.
4680         */
4681        qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4682                                                  ndev->name);
4683        if (!qdev->workqueue) {
4684                err = -ENOMEM;
4685                goto err_out2;
4686        }
4687
4688        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4689        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4690        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4691        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4692        INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4693        INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4694        init_completion(&qdev->ide_completion);
4695        mutex_init(&qdev->mpi_mutex);
4696
4697        if (!cards_found) {
4698                dev_info(&pdev->dev, "%s\n", DRV_STRING);
4699                dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4700                         DRV_NAME, DRV_VERSION);
4701        }
4702        return 0;
4703err_out2:
4704        ql_release_all(pdev);
4705err_out1:
4706        pci_disable_device(pdev);
4707        return err;
4708}
4709
4710static const struct net_device_ops qlge_netdev_ops = {
4711        .ndo_open               = qlge_open,
4712        .ndo_stop               = qlge_close,
4713        .ndo_start_xmit         = qlge_send,
4714        .ndo_change_mtu         = qlge_change_mtu,
4715        .ndo_get_stats          = qlge_get_stats,
4716        .ndo_set_rx_mode        = qlge_set_multicast_list,
4717        .ndo_set_mac_address    = qlge_set_mac_address,
4718        .ndo_validate_addr      = eth_validate_addr,
4719        .ndo_tx_timeout         = qlge_tx_timeout,
4720        .ndo_set_features       = qlge_set_features,
4721        .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4722        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4723};
4724
4725static void ql_timer(struct timer_list *t)
4726{
4727        struct ql_adapter *qdev = from_timer(qdev, t, timer);
4728        u32 var = 0;
4729
4730        var = ql_read32(qdev, STS);
4731        if (pci_channel_offline(qdev->pdev)) {
4732                netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4733                return;
4734        }
4735
4736        mod_timer(&qdev->timer, jiffies + (5*HZ));
4737}
4738
4739static int qlge_probe(struct pci_dev *pdev,
4740                      const struct pci_device_id *pci_entry)
4741{
4742        struct net_device *ndev = NULL;
4743        struct ql_adapter *qdev = NULL;
4744        static int cards_found = 0;
4745        int err = 0;
4746
4747        ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4748                        min(MAX_CPUS, netif_get_num_default_rss_queues()));
4749        if (!ndev)
4750                return -ENOMEM;
4751
4752        err = ql_init_device(pdev, ndev, cards_found);
4753        if (err < 0) {
4754                free_netdev(ndev);
4755                return err;
4756        }
4757
4758        qdev = netdev_priv(ndev);
4759        SET_NETDEV_DEV(ndev, &pdev->dev);
4760        ndev->hw_features = NETIF_F_SG |
4761                            NETIF_F_IP_CSUM |
4762                            NETIF_F_TSO |
4763                            NETIF_F_TSO_ECN |
4764                            NETIF_F_HW_VLAN_CTAG_TX |
4765                            NETIF_F_HW_VLAN_CTAG_RX |
4766                            NETIF_F_HW_VLAN_CTAG_FILTER |
4767                            NETIF_F_RXCSUM;
4768        ndev->features = ndev->hw_features;
4769        ndev->vlan_features = ndev->hw_features;
4770        /* vlan gets same features (except vlan filter) */
4771        ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4772                                 NETIF_F_HW_VLAN_CTAG_TX |
4773                                 NETIF_F_HW_VLAN_CTAG_RX);
4774
4775        if (test_bit(QL_DMA64, &qdev->flags))
4776                ndev->features |= NETIF_F_HIGHDMA;
4777
4778        /*
4779         * Set up net_device structure.
4780         */
4781        ndev->tx_queue_len = qdev->tx_ring_size;
4782        ndev->irq = pdev->irq;
4783
4784        ndev->netdev_ops = &qlge_netdev_ops;
4785        ndev->ethtool_ops = &qlge_ethtool_ops;
4786        ndev->watchdog_timeo = 10 * HZ;
4787
4788        /* MTU range: this driver only supports 1500 or 9000, so this only
4789         * filters out values above or below, and we'll rely on
4790         * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4791         */
4792        ndev->min_mtu = ETH_DATA_LEN;
4793        ndev->max_mtu = 9000;
4794
4795        err = register_netdev(ndev);
4796        if (err) {
4797                dev_err(&pdev->dev, "net device registration failed.\n");
4798                ql_release_all(pdev);
4799                pci_disable_device(pdev);
4800                free_netdev(ndev);
4801                return err;
4802        }
4803        /* Start up the timer to trigger EEH if
4804         * the bus goes dead
4805         */
4806        timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4807        mod_timer(&qdev->timer, jiffies + (5*HZ));
4808        ql_link_off(qdev);
4809        ql_display_dev_info(ndev);
4810        atomic_set(&qdev->lb_count, 0);
4811        cards_found++;
4812        return 0;
4813}
4814
4815netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4816{
4817        return qlge_send(skb, ndev);
4818}
4819
4820int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4821{
4822        return ql_clean_inbound_rx_ring(rx_ring, budget);
4823}
4824
4825static void qlge_remove(struct pci_dev *pdev)
4826{
4827        struct net_device *ndev = pci_get_drvdata(pdev);
4828        struct ql_adapter *qdev = netdev_priv(ndev);
4829        del_timer_sync(&qdev->timer);
4830        ql_cancel_all_work_sync(qdev);
4831        unregister_netdev(ndev);
4832        ql_release_all(pdev);
4833        pci_disable_device(pdev);
4834        free_netdev(ndev);
4835}
4836
4837/* Clean up resources without touching hardware. */
4838static void ql_eeh_close(struct net_device *ndev)
4839{
4840        int i;
4841        struct ql_adapter *qdev = netdev_priv(ndev);
4842
4843        if (netif_carrier_ok(ndev)) {
4844                netif_carrier_off(ndev);
4845                netif_stop_queue(ndev);
4846        }
4847
4848        /* Disabling the timer */
4849        ql_cancel_all_work_sync(qdev);
4850
4851        for (i = 0; i < qdev->rss_ring_count; i++)
4852                netif_napi_del(&qdev->rx_ring[i].napi);
4853
4854        clear_bit(QL_ADAPTER_UP, &qdev->flags);
4855        ql_tx_ring_clean(qdev);
4856        ql_free_rx_buffers(qdev);
4857        ql_release_adapter_resources(qdev);
4858}
4859
4860/*
4861 * This callback is called by the PCI subsystem whenever
4862 * a PCI bus error is detected.
4863 */
4864static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4865                                               enum pci_channel_state state)
4866{
4867        struct net_device *ndev = pci_get_drvdata(pdev);
4868        struct ql_adapter *qdev = netdev_priv(ndev);
4869
4870        switch (state) {
4871        case pci_channel_io_normal:
4872                return PCI_ERS_RESULT_CAN_RECOVER;
4873        case pci_channel_io_frozen:
4874                netif_device_detach(ndev);
4875                del_timer_sync(&qdev->timer);
4876                if (netif_running(ndev))
4877                        ql_eeh_close(ndev);
4878                pci_disable_device(pdev);
4879                return PCI_ERS_RESULT_NEED_RESET;
4880        case pci_channel_io_perm_failure:
4881                dev_err(&pdev->dev,
4882                        "%s: pci_channel_io_perm_failure.\n", __func__);
4883                del_timer_sync(&qdev->timer);
4884                ql_eeh_close(ndev);
4885                set_bit(QL_EEH_FATAL, &qdev->flags);
4886                return PCI_ERS_RESULT_DISCONNECT;
4887        }
4888
4889        /* Request a slot reset. */
4890        return PCI_ERS_RESULT_NEED_RESET;
4891}
4892
4893/*
4894 * This callback is called after the PCI buss has been reset.
4895 * Basically, this tries to restart the card from scratch.
4896 * This is a shortened version of the device probe/discovery code,
4897 * it resembles the first-half of the () routine.
4898 */
4899static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4900{
4901        struct net_device *ndev = pci_get_drvdata(pdev);
4902        struct ql_adapter *qdev = netdev_priv(ndev);
4903
4904        pdev->error_state = pci_channel_io_normal;
4905
4906        pci_restore_state(pdev);
4907        if (pci_enable_device(pdev)) {
4908                netif_err(qdev, ifup, qdev->ndev,
4909                          "Cannot re-enable PCI device after reset.\n");
4910                return PCI_ERS_RESULT_DISCONNECT;
4911        }
4912        pci_set_master(pdev);
4913
4914        if (ql_adapter_reset(qdev)) {
4915                netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4916                set_bit(QL_EEH_FATAL, &qdev->flags);
4917                return PCI_ERS_RESULT_DISCONNECT;
4918        }
4919
4920        return PCI_ERS_RESULT_RECOVERED;
4921}
4922
4923static void qlge_io_resume(struct pci_dev *pdev)
4924{
4925        struct net_device *ndev = pci_get_drvdata(pdev);
4926        struct ql_adapter *qdev = netdev_priv(ndev);
4927        int err = 0;
4928
4929        if (netif_running(ndev)) {
4930                err = qlge_open(ndev);
4931                if (err) {
4932                        netif_err(qdev, ifup, qdev->ndev,
4933                                  "Device initialization failed after reset.\n");
4934                        return;
4935                }
4936        } else {
4937                netif_err(qdev, ifup, qdev->ndev,
4938                          "Device was not running prior to EEH.\n");
4939        }
4940        mod_timer(&qdev->timer, jiffies + (5*HZ));
4941        netif_device_attach(ndev);
4942}
4943
4944static const struct pci_error_handlers qlge_err_handler = {
4945        .error_detected = qlge_io_error_detected,
4946        .slot_reset = qlge_io_slot_reset,
4947        .resume = qlge_io_resume,
4948};
4949
4950static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4951{
4952        struct net_device *ndev = pci_get_drvdata(pdev);
4953        struct ql_adapter *qdev = netdev_priv(ndev);
4954        int err;
4955
4956        netif_device_detach(ndev);
4957        del_timer_sync(&qdev->timer);
4958
4959        if (netif_running(ndev)) {
4960                err = ql_adapter_down(qdev);
4961                if (!err)
4962                        return err;
4963        }
4964
4965        ql_wol(qdev);
4966        err = pci_save_state(pdev);
4967        if (err)
4968                return err;
4969
4970        pci_disable_device(pdev);
4971
4972        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4973
4974        return 0;
4975}
4976
4977#ifdef CONFIG_PM
4978static int qlge_resume(struct pci_dev *pdev)
4979{
4980        struct net_device *ndev = pci_get_drvdata(pdev);
4981        struct ql_adapter *qdev = netdev_priv(ndev);
4982        int err;
4983
4984        pci_set_power_state(pdev, PCI_D0);
4985        pci_restore_state(pdev);
4986        err = pci_enable_device(pdev);
4987        if (err) {
4988                netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4989                return err;
4990        }
4991        pci_set_master(pdev);
4992
4993        pci_enable_wake(pdev, PCI_D3hot, 0);
4994        pci_enable_wake(pdev, PCI_D3cold, 0);
4995
4996        if (netif_running(ndev)) {
4997                err = ql_adapter_up(qdev);
4998                if (err)
4999                        return err;
5000        }
5001
5002        mod_timer(&qdev->timer, jiffies + (5*HZ));
5003        netif_device_attach(ndev);
5004
5005        return 0;
5006}
5007#endif /* CONFIG_PM */
5008
5009static void qlge_shutdown(struct pci_dev *pdev)
5010{
5011        qlge_suspend(pdev, PMSG_SUSPEND);
5012}
5013
5014static struct pci_driver qlge_driver = {
5015        .name = DRV_NAME,
5016        .id_table = qlge_pci_tbl,
5017        .probe = qlge_probe,
5018        .remove = qlge_remove,
5019#ifdef CONFIG_PM
5020        .suspend = qlge_suspend,
5021        .resume = qlge_resume,
5022#endif
5023        .shutdown = qlge_shutdown,
5024        .err_handler = &qlge_err_handler
5025};
5026
5027module_pci_driver(qlge_driver);
5028