linux/drivers/net/ethernet/cisco/enic/enic_main.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/string.h>
  23#include <linux/errno.h>
  24#include <linux/types.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/workqueue.h>
  28#include <linux/pci.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/if.h>
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/ethtool.h>
  35#include <linux/in.h>
  36#include <linux/ip.h>
  37#include <linux/ipv6.h>
  38#include <linux/tcp.h>
  39#include <linux/rtnetlink.h>
  40#include <linux/prefetch.h>
  41#include <net/ip6_checksum.h>
  42
  43#include "cq_enet_desc.h"
  44#include "vnic_dev.h"
  45#include "vnic_intr.h"
  46#include "vnic_stats.h"
  47#include "vnic_vic.h"
  48#include "enic_res.h"
  49#include "enic.h"
  50#include "enic_dev.h"
  51#include "enic_pp.h"
  52
  53#define ENIC_NOTIFY_TIMER_PERIOD        (2 * HZ)
  54#define WQ_ENET_MAX_DESC_LEN            (1 << WQ_ENET_LEN_BITS)
  55#define MAX_TSO                         (1 << 16)
  56#define ENIC_DESC_MAX_SPLITS            (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
  57
  58#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
  59#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
  60#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
  61
  62/* Supported devices */
  63static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
  64        { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
  65        { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
  66        { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
  67        { 0, }  /* end of table */
  68};
  69
  70MODULE_DESCRIPTION(DRV_DESCRIPTION);
  71MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
  72MODULE_LICENSE("GPL");
  73MODULE_VERSION(DRV_VERSION);
  74MODULE_DEVICE_TABLE(pci, enic_id_table);
  75
  76struct enic_stat {
  77        char name[ETH_GSTRING_LEN];
  78        unsigned int offset;
  79};
  80
  81#define ENIC_TX_STAT(stat)      \
  82        { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
  83#define ENIC_RX_STAT(stat)      \
  84        { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
  85
  86static const struct enic_stat enic_tx_stats[] = {
  87        ENIC_TX_STAT(tx_frames_ok),
  88        ENIC_TX_STAT(tx_unicast_frames_ok),
  89        ENIC_TX_STAT(tx_multicast_frames_ok),
  90        ENIC_TX_STAT(tx_broadcast_frames_ok),
  91        ENIC_TX_STAT(tx_bytes_ok),
  92        ENIC_TX_STAT(tx_unicast_bytes_ok),
  93        ENIC_TX_STAT(tx_multicast_bytes_ok),
  94        ENIC_TX_STAT(tx_broadcast_bytes_ok),
  95        ENIC_TX_STAT(tx_drops),
  96        ENIC_TX_STAT(tx_errors),
  97        ENIC_TX_STAT(tx_tso),
  98};
  99
 100static const struct enic_stat enic_rx_stats[] = {
 101        ENIC_RX_STAT(rx_frames_ok),
 102        ENIC_RX_STAT(rx_frames_total),
 103        ENIC_RX_STAT(rx_unicast_frames_ok),
 104        ENIC_RX_STAT(rx_multicast_frames_ok),
 105        ENIC_RX_STAT(rx_broadcast_frames_ok),
 106        ENIC_RX_STAT(rx_bytes_ok),
 107        ENIC_RX_STAT(rx_unicast_bytes_ok),
 108        ENIC_RX_STAT(rx_multicast_bytes_ok),
 109        ENIC_RX_STAT(rx_broadcast_bytes_ok),
 110        ENIC_RX_STAT(rx_drop),
 111        ENIC_RX_STAT(rx_no_bufs),
 112        ENIC_RX_STAT(rx_errors),
 113        ENIC_RX_STAT(rx_rss),
 114        ENIC_RX_STAT(rx_crc_errors),
 115        ENIC_RX_STAT(rx_frames_64),
 116        ENIC_RX_STAT(rx_frames_127),
 117        ENIC_RX_STAT(rx_frames_255),
 118        ENIC_RX_STAT(rx_frames_511),
 119        ENIC_RX_STAT(rx_frames_1023),
 120        ENIC_RX_STAT(rx_frames_1518),
 121        ENIC_RX_STAT(rx_frames_to_max),
 122};
 123
 124static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
 125static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
 126
 127int enic_is_dynamic(struct enic *enic)
 128{
 129        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
 130}
 131
 132int enic_sriov_enabled(struct enic *enic)
 133{
 134        return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
 135}
 136
 137static int enic_is_sriov_vf(struct enic *enic)
 138{
 139        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
 140}
 141
 142int enic_is_valid_vf(struct enic *enic, int vf)
 143{
 144#ifdef CONFIG_PCI_IOV
 145        return vf >= 0 && vf < enic->num_vfs;
 146#else
 147        return 0;
 148#endif
 149}
 150
 151static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
 152{
 153        return rq;
 154}
 155
 156static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
 157{
 158        return enic->rq_count + wq;
 159}
 160
 161static inline unsigned int enic_legacy_io_intr(void)
 162{
 163        return 0;
 164}
 165
 166static inline unsigned int enic_legacy_err_intr(void)
 167{
 168        return 1;
 169}
 170
 171static inline unsigned int enic_legacy_notify_intr(void)
 172{
 173        return 2;
 174}
 175
 176static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
 177{
 178        return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
 179}
 180
 181static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
 182{
 183        return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
 184}
 185
 186static inline unsigned int enic_msix_err_intr(struct enic *enic)
 187{
 188        return enic->rq_count + enic->wq_count;
 189}
 190
 191static inline unsigned int enic_msix_notify_intr(struct enic *enic)
 192{
 193        return enic->rq_count + enic->wq_count + 1;
 194}
 195
 196static int enic_get_settings(struct net_device *netdev,
 197        struct ethtool_cmd *ecmd)
 198{
 199        struct enic *enic = netdev_priv(netdev);
 200
 201        ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
 202        ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
 203        ecmd->port = PORT_FIBRE;
 204        ecmd->transceiver = XCVR_EXTERNAL;
 205
 206        if (netif_carrier_ok(netdev)) {
 207                ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
 208                ecmd->duplex = DUPLEX_FULL;
 209        } else {
 210                ethtool_cmd_speed_set(ecmd, -1);
 211                ecmd->duplex = -1;
 212        }
 213
 214        ecmd->autoneg = AUTONEG_DISABLE;
 215
 216        return 0;
 217}
 218
 219static void enic_get_drvinfo(struct net_device *netdev,
 220        struct ethtool_drvinfo *drvinfo)
 221{
 222        struct enic *enic = netdev_priv(netdev);
 223        struct vnic_devcmd_fw_info *fw_info;
 224
 225        enic_dev_fw_info(enic, &fw_info);
 226
 227        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 228        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 229        strlcpy(drvinfo->fw_version, fw_info->fw_version,
 230                sizeof(drvinfo->fw_version));
 231        strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
 232                sizeof(drvinfo->bus_info));
 233}
 234
 235static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 236{
 237        unsigned int i;
 238
 239        switch (stringset) {
 240        case ETH_SS_STATS:
 241                for (i = 0; i < enic_n_tx_stats; i++) {
 242                        memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
 243                        data += ETH_GSTRING_LEN;
 244                }
 245                for (i = 0; i < enic_n_rx_stats; i++) {
 246                        memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
 247                        data += ETH_GSTRING_LEN;
 248                }
 249                break;
 250        }
 251}
 252
 253static int enic_get_sset_count(struct net_device *netdev, int sset)
 254{
 255        switch (sset) {
 256        case ETH_SS_STATS:
 257                return enic_n_tx_stats + enic_n_rx_stats;
 258        default:
 259                return -EOPNOTSUPP;
 260        }
 261}
 262
 263static void enic_get_ethtool_stats(struct net_device *netdev,
 264        struct ethtool_stats *stats, u64 *data)
 265{
 266        struct enic *enic = netdev_priv(netdev);
 267        struct vnic_stats *vstats;
 268        unsigned int i;
 269
 270        enic_dev_stats_dump(enic, &vstats);
 271
 272        for (i = 0; i < enic_n_tx_stats; i++)
 273                *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
 274        for (i = 0; i < enic_n_rx_stats; i++)
 275                *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
 276}
 277
 278static u32 enic_get_msglevel(struct net_device *netdev)
 279{
 280        struct enic *enic = netdev_priv(netdev);
 281        return enic->msg_enable;
 282}
 283
 284static void enic_set_msglevel(struct net_device *netdev, u32 value)
 285{
 286        struct enic *enic = netdev_priv(netdev);
 287        enic->msg_enable = value;
 288}
 289
 290static int enic_get_coalesce(struct net_device *netdev,
 291        struct ethtool_coalesce *ecmd)
 292{
 293        struct enic *enic = netdev_priv(netdev);
 294
 295        ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
 296        ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
 297
 298        return 0;
 299}
 300
 301static int enic_set_coalesce(struct net_device *netdev,
 302        struct ethtool_coalesce *ecmd)
 303{
 304        struct enic *enic = netdev_priv(netdev);
 305        u32 tx_coalesce_usecs;
 306        u32 rx_coalesce_usecs;
 307        unsigned int i, intr;
 308
 309        tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
 310                vnic_dev_get_intr_coal_timer_max(enic->vdev));
 311        rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
 312                vnic_dev_get_intr_coal_timer_max(enic->vdev));
 313
 314        switch (vnic_dev_get_intr_mode(enic->vdev)) {
 315        case VNIC_DEV_INTR_MODE_INTX:
 316                if (tx_coalesce_usecs != rx_coalesce_usecs)
 317                        return -EINVAL;
 318
 319                intr = enic_legacy_io_intr();
 320                vnic_intr_coalescing_timer_set(&enic->intr[intr],
 321                        tx_coalesce_usecs);
 322                break;
 323        case VNIC_DEV_INTR_MODE_MSI:
 324                if (tx_coalesce_usecs != rx_coalesce_usecs)
 325                        return -EINVAL;
 326
 327                vnic_intr_coalescing_timer_set(&enic->intr[0],
 328                        tx_coalesce_usecs);
 329                break;
 330        case VNIC_DEV_INTR_MODE_MSIX:
 331                for (i = 0; i < enic->wq_count; i++) {
 332                        intr = enic_msix_wq_intr(enic, i);
 333                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
 334                                tx_coalesce_usecs);
 335                }
 336
 337                for (i = 0; i < enic->rq_count; i++) {
 338                        intr = enic_msix_rq_intr(enic, i);
 339                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
 340                                rx_coalesce_usecs);
 341                }
 342
 343                break;
 344        default:
 345                break;
 346        }
 347
 348        enic->tx_coalesce_usecs = tx_coalesce_usecs;
 349        enic->rx_coalesce_usecs = rx_coalesce_usecs;
 350
 351        return 0;
 352}
 353
 354static const struct ethtool_ops enic_ethtool_ops = {
 355        .get_settings = enic_get_settings,
 356        .get_drvinfo = enic_get_drvinfo,
 357        .get_msglevel = enic_get_msglevel,
 358        .set_msglevel = enic_set_msglevel,
 359        .get_link = ethtool_op_get_link,
 360        .get_strings = enic_get_strings,
 361        .get_sset_count = enic_get_sset_count,
 362        .get_ethtool_stats = enic_get_ethtool_stats,
 363        .get_coalesce = enic_get_coalesce,
 364        .set_coalesce = enic_set_coalesce,
 365};
 366
 367static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
 368{
 369        struct enic *enic = vnic_dev_priv(wq->vdev);
 370
 371        if (buf->sop)
 372                pci_unmap_single(enic->pdev, buf->dma_addr,
 373                        buf->len, PCI_DMA_TODEVICE);
 374        else
 375                pci_unmap_page(enic->pdev, buf->dma_addr,
 376                        buf->len, PCI_DMA_TODEVICE);
 377
 378        if (buf->os_buf)
 379                dev_kfree_skb_any(buf->os_buf);
 380}
 381
 382static void enic_wq_free_buf(struct vnic_wq *wq,
 383        struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
 384{
 385        enic_free_wq_buf(wq, buf);
 386}
 387
 388static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
 389        u8 type, u16 q_number, u16 completed_index, void *opaque)
 390{
 391        struct enic *enic = vnic_dev_priv(vdev);
 392
 393        spin_lock(&enic->wq_lock[q_number]);
 394
 395        vnic_wq_service(&enic->wq[q_number], cq_desc,
 396                completed_index, enic_wq_free_buf,
 397                opaque);
 398
 399        if (netif_queue_stopped(enic->netdev) &&
 400            vnic_wq_desc_avail(&enic->wq[q_number]) >=
 401            (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
 402                netif_wake_queue(enic->netdev);
 403
 404        spin_unlock(&enic->wq_lock[q_number]);
 405
 406        return 0;
 407}
 408
 409static void enic_log_q_error(struct enic *enic)
 410{
 411        unsigned int i;
 412        u32 error_status;
 413
 414        for (i = 0; i < enic->wq_count; i++) {
 415                error_status = vnic_wq_error_status(&enic->wq[i]);
 416                if (error_status)
 417                        netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
 418                                i, error_status);
 419        }
 420
 421        for (i = 0; i < enic->rq_count; i++) {
 422                error_status = vnic_rq_error_status(&enic->rq[i]);
 423                if (error_status)
 424                        netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
 425                                i, error_status);
 426        }
 427}
 428
 429static void enic_msglvl_check(struct enic *enic)
 430{
 431        u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
 432
 433        if (msg_enable != enic->msg_enable) {
 434                netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
 435                        enic->msg_enable, msg_enable);
 436                enic->msg_enable = msg_enable;
 437        }
 438}
 439
 440static void enic_mtu_check(struct enic *enic)
 441{
 442        u32 mtu = vnic_dev_mtu(enic->vdev);
 443        struct net_device *netdev = enic->netdev;
 444
 445        if (mtu && mtu != enic->port_mtu) {
 446                enic->port_mtu = mtu;
 447                if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 448                        mtu = max_t(int, ENIC_MIN_MTU,
 449                                min_t(int, ENIC_MAX_MTU, mtu));
 450                        if (mtu != netdev->mtu)
 451                                schedule_work(&enic->change_mtu_work);
 452                } else {
 453                        if (mtu < netdev->mtu)
 454                                netdev_warn(netdev,
 455                                        "interface MTU (%d) set higher "
 456                                        "than switch port MTU (%d)\n",
 457                                        netdev->mtu, mtu);
 458                }
 459        }
 460}
 461
 462static void enic_link_check(struct enic *enic)
 463{
 464        int link_status = vnic_dev_link_status(enic->vdev);
 465        int carrier_ok = netif_carrier_ok(enic->netdev);
 466
 467        if (link_status && !carrier_ok) {
 468                netdev_info(enic->netdev, "Link UP\n");
 469                netif_carrier_on(enic->netdev);
 470        } else if (!link_status && carrier_ok) {
 471                netdev_info(enic->netdev, "Link DOWN\n");
 472                netif_carrier_off(enic->netdev);
 473        }
 474}
 475
 476static void enic_notify_check(struct enic *enic)
 477{
 478        enic_msglvl_check(enic);
 479        enic_mtu_check(enic);
 480        enic_link_check(enic);
 481}
 482
 483#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
 484
 485static irqreturn_t enic_isr_legacy(int irq, void *data)
 486{
 487        struct net_device *netdev = data;
 488        struct enic *enic = netdev_priv(netdev);
 489        unsigned int io_intr = enic_legacy_io_intr();
 490        unsigned int err_intr = enic_legacy_err_intr();
 491        unsigned int notify_intr = enic_legacy_notify_intr();
 492        u32 pba;
 493
 494        vnic_intr_mask(&enic->intr[io_intr]);
 495
 496        pba = vnic_intr_legacy_pba(enic->legacy_pba);
 497        if (!pba) {
 498                vnic_intr_unmask(&enic->intr[io_intr]);
 499                return IRQ_NONE;        /* not our interrupt */
 500        }
 501
 502        if (ENIC_TEST_INTR(pba, notify_intr)) {
 503                vnic_intr_return_all_credits(&enic->intr[notify_intr]);
 504                enic_notify_check(enic);
 505        }
 506
 507        if (ENIC_TEST_INTR(pba, err_intr)) {
 508                vnic_intr_return_all_credits(&enic->intr[err_intr]);
 509                enic_log_q_error(enic);
 510                /* schedule recovery from WQ/RQ error */
 511                schedule_work(&enic->reset);
 512                return IRQ_HANDLED;
 513        }
 514
 515        if (ENIC_TEST_INTR(pba, io_intr)) {
 516                if (napi_schedule_prep(&enic->napi[0]))
 517                        __napi_schedule(&enic->napi[0]);
 518        } else {
 519                vnic_intr_unmask(&enic->intr[io_intr]);
 520        }
 521
 522        return IRQ_HANDLED;
 523}
 524
 525static irqreturn_t enic_isr_msi(int irq, void *data)
 526{
 527        struct enic *enic = data;
 528
 529        /* With MSI, there is no sharing of interrupts, so this is
 530         * our interrupt and there is no need to ack it.  The device
 531         * is not providing per-vector masking, so the OS will not
 532         * write to PCI config space to mask/unmask the interrupt.
 533         * We're using mask_on_assertion for MSI, so the device
 534         * automatically masks the interrupt when the interrupt is
 535         * generated.  Later, when exiting polling, the interrupt
 536         * will be unmasked (see enic_poll).
 537         *
 538         * Also, the device uses the same PCIe Traffic Class (TC)
 539         * for Memory Write data and MSI, so there are no ordering
 540         * issues; the MSI will always arrive at the Root Complex
 541         * _after_ corresponding Memory Writes (i.e. descriptor
 542         * writes).
 543         */
 544
 545        napi_schedule(&enic->napi[0]);
 546
 547        return IRQ_HANDLED;
 548}
 549
 550static irqreturn_t enic_isr_msix_rq(int irq, void *data)
 551{
 552        struct napi_struct *napi = data;
 553
 554        /* schedule NAPI polling for RQ cleanup */
 555        napi_schedule(napi);
 556
 557        return IRQ_HANDLED;
 558}
 559
 560static irqreturn_t enic_isr_msix_wq(int irq, void *data)
 561{
 562        struct enic *enic = data;
 563        unsigned int cq = enic_cq_wq(enic, 0);
 564        unsigned int intr = enic_msix_wq_intr(enic, 0);
 565        unsigned int wq_work_to_do = -1; /* no limit */
 566        unsigned int wq_work_done;
 567
 568        wq_work_done = vnic_cq_service(&enic->cq[cq],
 569                wq_work_to_do, enic_wq_service, NULL);
 570
 571        vnic_intr_return_credits(&enic->intr[intr],
 572                wq_work_done,
 573                1 /* unmask intr */,
 574                1 /* reset intr timer */);
 575
 576        return IRQ_HANDLED;
 577}
 578
 579static irqreturn_t enic_isr_msix_err(int irq, void *data)
 580{
 581        struct enic *enic = data;
 582        unsigned int intr = enic_msix_err_intr(enic);
 583
 584        vnic_intr_return_all_credits(&enic->intr[intr]);
 585
 586        enic_log_q_error(enic);
 587
 588        /* schedule recovery from WQ/RQ error */
 589        schedule_work(&enic->reset);
 590
 591        return IRQ_HANDLED;
 592}
 593
 594static irqreturn_t enic_isr_msix_notify(int irq, void *data)
 595{
 596        struct enic *enic = data;
 597        unsigned int intr = enic_msix_notify_intr(enic);
 598
 599        vnic_intr_return_all_credits(&enic->intr[intr]);
 600        enic_notify_check(enic);
 601
 602        return IRQ_HANDLED;
 603}
 604
 605static inline void enic_queue_wq_skb_cont(struct enic *enic,
 606        struct vnic_wq *wq, struct sk_buff *skb,
 607        unsigned int len_left, int loopback)
 608{
 609        const skb_frag_t *frag;
 610
 611        /* Queue additional data fragments */
 612        for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 613                len_left -= skb_frag_size(frag);
 614                enic_queue_wq_desc_cont(wq, skb,
 615                        skb_frag_dma_map(&enic->pdev->dev,
 616                                         frag, 0, skb_frag_size(frag),
 617                                         DMA_TO_DEVICE),
 618                        skb_frag_size(frag),
 619                        (len_left == 0),        /* EOP? */
 620                        loopback);
 621        }
 622}
 623
 624static inline void enic_queue_wq_skb_vlan(struct enic *enic,
 625        struct vnic_wq *wq, struct sk_buff *skb,
 626        int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 627{
 628        unsigned int head_len = skb_headlen(skb);
 629        unsigned int len_left = skb->len - head_len;
 630        int eop = (len_left == 0);
 631
 632        /* Queue the main skb fragment. The fragments are no larger
 633         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 634         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 635         * per fragment is queued.
 636         */
 637        enic_queue_wq_desc(wq, skb,
 638                pci_map_single(enic->pdev, skb->data,
 639                        head_len, PCI_DMA_TODEVICE),
 640                head_len,
 641                vlan_tag_insert, vlan_tag,
 642                eop, loopback);
 643
 644        if (!eop)
 645                enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 646}
 647
 648static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
 649        struct vnic_wq *wq, struct sk_buff *skb,
 650        int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 651{
 652        unsigned int head_len = skb_headlen(skb);
 653        unsigned int len_left = skb->len - head_len;
 654        unsigned int hdr_len = skb_checksum_start_offset(skb);
 655        unsigned int csum_offset = hdr_len + skb->csum_offset;
 656        int eop = (len_left == 0);
 657
 658        /* Queue the main skb fragment. The fragments are no larger
 659         * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 660         * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 661         * per fragment is queued.
 662         */
 663        enic_queue_wq_desc_csum_l4(wq, skb,
 664                pci_map_single(enic->pdev, skb->data,
 665                        head_len, PCI_DMA_TODEVICE),
 666                head_len,
 667                csum_offset,
 668                hdr_len,
 669                vlan_tag_insert, vlan_tag,
 670                eop, loopback);
 671
 672        if (!eop)
 673                enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 674}
 675
 676static inline void enic_queue_wq_skb_tso(struct enic *enic,
 677        struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
 678        int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 679{
 680        unsigned int frag_len_left = skb_headlen(skb);
 681        unsigned int len_left = skb->len - frag_len_left;
 682        unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 683        int eop = (len_left == 0);
 684        unsigned int len;
 685        dma_addr_t dma_addr;
 686        unsigned int offset = 0;
 687        skb_frag_t *frag;
 688
 689        /* Preload TCP csum field with IP pseudo hdr calculated
 690         * with IP length set to zero.  HW will later add in length
 691         * to each TCP segment resulting from the TSO.
 692         */
 693
 694        if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 695                ip_hdr(skb)->check = 0;
 696                tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 697                        ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 698        } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
 699                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 700                        &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 701        }
 702
 703        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 704         * for the main skb fragment
 705         */
 706        while (frag_len_left) {
 707                len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
 708                dma_addr = pci_map_single(enic->pdev, skb->data + offset,
 709                                len, PCI_DMA_TODEVICE);
 710                enic_queue_wq_desc_tso(wq, skb,
 711                        dma_addr,
 712                        len,
 713                        mss, hdr_len,
 714                        vlan_tag_insert, vlan_tag,
 715                        eop && (len == frag_len_left), loopback);
 716                frag_len_left -= len;
 717                offset += len;
 718        }
 719
 720        if (eop)
 721                return;
 722
 723        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 724         * for additional data fragments
 725         */
 726        for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 727                len_left -= skb_frag_size(frag);
 728                frag_len_left = skb_frag_size(frag);
 729                offset = 0;
 730
 731                while (frag_len_left) {
 732                        len = min(frag_len_left,
 733                                (unsigned int)WQ_ENET_MAX_DESC_LEN);
 734                        dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
 735                                                    offset, len,
 736                                                    DMA_TO_DEVICE);
 737                        enic_queue_wq_desc_cont(wq, skb,
 738                                dma_addr,
 739                                len,
 740                                (len_left == 0) &&
 741                                (len == frag_len_left),         /* EOP? */
 742                                loopback);
 743                        frag_len_left -= len;
 744                        offset += len;
 745                }
 746        }
 747}
 748
 749static inline void enic_queue_wq_skb(struct enic *enic,
 750        struct vnic_wq *wq, struct sk_buff *skb)
 751{
 752        unsigned int mss = skb_shinfo(skb)->gso_size;
 753        unsigned int vlan_tag = 0;
 754        int vlan_tag_insert = 0;
 755        int loopback = 0;
 756
 757        if (vlan_tx_tag_present(skb)) {
 758                /* VLAN tag from trunking driver */
 759                vlan_tag_insert = 1;
 760                vlan_tag = vlan_tx_tag_get(skb);
 761        } else if (enic->loop_enable) {
 762                vlan_tag = enic->loop_tag;
 763                loopback = 1;
 764        }
 765
 766        if (mss)
 767                enic_queue_wq_skb_tso(enic, wq, skb, mss,
 768                        vlan_tag_insert, vlan_tag, loopback);
 769        else if (skb->ip_summed == CHECKSUM_PARTIAL)
 770                enic_queue_wq_skb_csum_l4(enic, wq, skb,
 771                        vlan_tag_insert, vlan_tag, loopback);
 772        else
 773                enic_queue_wq_skb_vlan(enic, wq, skb,
 774                        vlan_tag_insert, vlan_tag, loopback);
 775}
 776
 777/* netif_tx_lock held, process context with BHs disabled, or BH */
 778static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 779        struct net_device *netdev)
 780{
 781        struct enic *enic = netdev_priv(netdev);
 782        struct vnic_wq *wq = &enic->wq[0];
 783        unsigned long flags;
 784
 785        if (skb->len <= 0) {
 786                dev_kfree_skb(skb);
 787                return NETDEV_TX_OK;
 788        }
 789
 790        /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
 791         * which is very likely.  In the off chance it's going to take
 792         * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
 793         */
 794
 795        if (skb_shinfo(skb)->gso_size == 0 &&
 796            skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
 797            skb_linearize(skb)) {
 798                dev_kfree_skb(skb);
 799                return NETDEV_TX_OK;
 800        }
 801
 802        spin_lock_irqsave(&enic->wq_lock[0], flags);
 803
 804        if (vnic_wq_desc_avail(wq) <
 805            skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
 806                netif_stop_queue(netdev);
 807                /* This is a hard error, log it */
 808                netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
 809                spin_unlock_irqrestore(&enic->wq_lock[0], flags);
 810                return NETDEV_TX_BUSY;
 811        }
 812
 813        enic_queue_wq_skb(enic, wq, skb);
 814
 815        if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
 816                netif_stop_queue(netdev);
 817
 818        spin_unlock_irqrestore(&enic->wq_lock[0], flags);
 819
 820        return NETDEV_TX_OK;
 821}
 822
 823/* dev_base_lock rwlock held, nominally process context */
 824static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
 825                                                struct rtnl_link_stats64 *net_stats)
 826{
 827        struct enic *enic = netdev_priv(netdev);
 828        struct vnic_stats *stats;
 829
 830        enic_dev_stats_dump(enic, &stats);
 831
 832        net_stats->tx_packets = stats->tx.tx_frames_ok;
 833        net_stats->tx_bytes = stats->tx.tx_bytes_ok;
 834        net_stats->tx_errors = stats->tx.tx_errors;
 835        net_stats->tx_dropped = stats->tx.tx_drops;
 836
 837        net_stats->rx_packets = stats->rx.rx_frames_ok;
 838        net_stats->rx_bytes = stats->rx.rx_bytes_ok;
 839        net_stats->rx_errors = stats->rx.rx_errors;
 840        net_stats->multicast = stats->rx.rx_multicast_frames_ok;
 841        net_stats->rx_over_errors = enic->rq_truncated_pkts;
 842        net_stats->rx_crc_errors = enic->rq_bad_fcs;
 843        net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
 844
 845        return net_stats;
 846}
 847
 848void enic_reset_addr_lists(struct enic *enic)
 849{
 850        enic->mc_count = 0;
 851        enic->uc_count = 0;
 852        enic->flags = 0;
 853}
 854
 855static int enic_set_mac_addr(struct net_device *netdev, char *addr)
 856{
 857        struct enic *enic = netdev_priv(netdev);
 858
 859        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 860                if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
 861                        return -EADDRNOTAVAIL;
 862        } else {
 863                if (!is_valid_ether_addr(addr))
 864                        return -EADDRNOTAVAIL;
 865        }
 866
 867        memcpy(netdev->dev_addr, addr, netdev->addr_len);
 868        netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
 869
 870        return 0;
 871}
 872
 873static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 874{
 875        struct enic *enic = netdev_priv(netdev);
 876        struct sockaddr *saddr = p;
 877        char *addr = saddr->sa_data;
 878        int err;
 879
 880        if (netif_running(enic->netdev)) {
 881                err = enic_dev_del_station_addr(enic);
 882                if (err)
 883                        return err;
 884        }
 885
 886        err = enic_set_mac_addr(netdev, addr);
 887        if (err)
 888                return err;
 889
 890        if (netif_running(enic->netdev)) {
 891                err = enic_dev_add_station_addr(enic);
 892                if (err)
 893                        return err;
 894        }
 895
 896        return err;
 897}
 898
 899static int enic_set_mac_address(struct net_device *netdev, void *p)
 900{
 901        struct sockaddr *saddr = p;
 902        char *addr = saddr->sa_data;
 903        struct enic *enic = netdev_priv(netdev);
 904        int err;
 905
 906        err = enic_dev_del_station_addr(enic);
 907        if (err)
 908                return err;
 909
 910        err = enic_set_mac_addr(netdev, addr);
 911        if (err)
 912                return err;
 913
 914        return enic_dev_add_station_addr(enic);
 915}
 916
 917static void enic_update_multicast_addr_list(struct enic *enic)
 918{
 919        struct net_device *netdev = enic->netdev;
 920        struct netdev_hw_addr *ha;
 921        unsigned int mc_count = netdev_mc_count(netdev);
 922        u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
 923        unsigned int i, j;
 924
 925        if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
 926                netdev_warn(netdev, "Registering only %d out of %d "
 927                        "multicast addresses\n",
 928                        ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
 929                mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
 930        }
 931
 932        /* Is there an easier way?  Trying to minimize to
 933         * calls to add/del multicast addrs.  We keep the
 934         * addrs from the last call in enic->mc_addr and
 935         * look for changes to add/del.
 936         */
 937
 938        i = 0;
 939        netdev_for_each_mc_addr(ha, netdev) {
 940                if (i == mc_count)
 941                        break;
 942                memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
 943        }
 944
 945        for (i = 0; i < enic->mc_count; i++) {
 946                for (j = 0; j < mc_count; j++)
 947                        if (compare_ether_addr(enic->mc_addr[i],
 948                                mc_addr[j]) == 0)
 949                                break;
 950                if (j == mc_count)
 951                        enic_dev_del_addr(enic, enic->mc_addr[i]);
 952        }
 953
 954        for (i = 0; i < mc_count; i++) {
 955                for (j = 0; j < enic->mc_count; j++)
 956                        if (compare_ether_addr(mc_addr[i],
 957                                enic->mc_addr[j]) == 0)
 958                                break;
 959                if (j == enic->mc_count)
 960                        enic_dev_add_addr(enic, mc_addr[i]);
 961        }
 962
 963        /* Save the list to compare against next time
 964         */
 965
 966        for (i = 0; i < mc_count; i++)
 967                memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
 968
 969        enic->mc_count = mc_count;
 970}
 971
 972static void enic_update_unicast_addr_list(struct enic *enic)
 973{
 974        struct net_device *netdev = enic->netdev;
 975        struct netdev_hw_addr *ha;
 976        unsigned int uc_count = netdev_uc_count(netdev);
 977        u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
 978        unsigned int i, j;
 979
 980        if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
 981                netdev_warn(netdev, "Registering only %d out of %d "
 982                        "unicast addresses\n",
 983                        ENIC_UNICAST_PERFECT_FILTERS, uc_count);
 984                uc_count = ENIC_UNICAST_PERFECT_FILTERS;
 985        }
 986
 987        /* Is there an easier way?  Trying to minimize to
 988         * calls to add/del unicast addrs.  We keep the
 989         * addrs from the last call in enic->uc_addr and
 990         * look for changes to add/del.
 991         */
 992
 993        i = 0;
 994        netdev_for_each_uc_addr(ha, netdev) {
 995                if (i == uc_count)
 996                        break;
 997                memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
 998        }
 999
1000        for (i = 0; i < enic->uc_count; i++) {
1001                for (j = 0; j < uc_count; j++)
1002                        if (compare_ether_addr(enic->uc_addr[i],
1003                                uc_addr[j]) == 0)
1004                                break;
1005                if (j == uc_count)
1006                        enic_dev_del_addr(enic, enic->uc_addr[i]);
1007        }
1008
1009        for (i = 0; i < uc_count; i++) {
1010                for (j = 0; j < enic->uc_count; j++)
1011                        if (compare_ether_addr(uc_addr[i],
1012                                enic->uc_addr[j]) == 0)
1013                                break;
1014                if (j == enic->uc_count)
1015                        enic_dev_add_addr(enic, uc_addr[i]);
1016        }
1017
1018        /* Save the list to compare against next time
1019         */
1020
1021        for (i = 0; i < uc_count; i++)
1022                memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1023
1024        enic->uc_count = uc_count;
1025}
1026
1027/* netif_tx_lock held, BHs disabled */
1028static void enic_set_rx_mode(struct net_device *netdev)
1029{
1030        struct enic *enic = netdev_priv(netdev);
1031        int directed = 1;
1032        int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1033        int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1034        int promisc = (netdev->flags & IFF_PROMISC) ||
1035                netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1036        int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1037                netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1038        unsigned int flags = netdev->flags |
1039                (allmulti ? IFF_ALLMULTI : 0) |
1040                (promisc ? IFF_PROMISC : 0);
1041
1042        if (enic->flags != flags) {
1043                enic->flags = flags;
1044                enic_dev_packet_filter(enic, directed,
1045                        multicast, broadcast, promisc, allmulti);
1046        }
1047
1048        if (!promisc) {
1049                enic_update_unicast_addr_list(enic);
1050                if (!allmulti)
1051                        enic_update_multicast_addr_list(enic);
1052        }
1053}
1054
1055/* netif_tx_lock held, BHs disabled */
1056static void enic_tx_timeout(struct net_device *netdev)
1057{
1058        struct enic *enic = netdev_priv(netdev);
1059        schedule_work(&enic->reset);
1060}
1061
1062static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1063{
1064        struct enic *enic = netdev_priv(netdev);
1065        struct enic_port_profile *pp;
1066        int err;
1067
1068        ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1069        if (err)
1070                return err;
1071
1072        if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1073                if (vf == PORT_SELF_VF) {
1074                        memcpy(pp->vf_mac, mac, ETH_ALEN);
1075                        return 0;
1076                } else {
1077                        /*
1078                         * For sriov vf's set the mac in hw
1079                         */
1080                        ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1081                                vnic_dev_set_mac_addr, mac);
1082                        return enic_dev_status_to_errno(err);
1083                }
1084        } else
1085                return -EINVAL;
1086}
1087
1088static int enic_set_vf_port(struct net_device *netdev, int vf,
1089        struct nlattr *port[])
1090{
1091        struct enic *enic = netdev_priv(netdev);
1092        struct enic_port_profile prev_pp;
1093        struct enic_port_profile *pp;
1094        int err = 0, restore_pp = 1;
1095
1096        ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1097        if (err)
1098                return err;
1099
1100        if (!port[IFLA_PORT_REQUEST])
1101                return -EOPNOTSUPP;
1102
1103        memcpy(&prev_pp, pp, sizeof(*enic->pp));
1104        memset(pp, 0, sizeof(*enic->pp));
1105
1106        pp->set |= ENIC_SET_REQUEST;
1107        pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1108
1109        if (port[IFLA_PORT_PROFILE]) {
1110                pp->set |= ENIC_SET_NAME;
1111                memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1112                        PORT_PROFILE_MAX);
1113        }
1114
1115        if (port[IFLA_PORT_INSTANCE_UUID]) {
1116                pp->set |= ENIC_SET_INSTANCE;
1117                memcpy(pp->instance_uuid,
1118                        nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1119        }
1120
1121        if (port[IFLA_PORT_HOST_UUID]) {
1122                pp->set |= ENIC_SET_HOST;
1123                memcpy(pp->host_uuid,
1124                        nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1125        }
1126
1127        if (vf == PORT_SELF_VF) {
1128                /* Special case handling: mac came from IFLA_VF_MAC */
1129                if (!is_zero_ether_addr(prev_pp.vf_mac))
1130                        memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1131
1132                if (is_zero_ether_addr(netdev->dev_addr))
1133                        eth_hw_addr_random(netdev);
1134        } else {
1135                /* SR-IOV VF: get mac from adapter */
1136                ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1137                        vnic_dev_get_mac_addr, pp->mac_addr);
1138                if (err) {
1139                        netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1140                        memcpy(pp, &prev_pp, sizeof(*pp));
1141                        return enic_dev_status_to_errno(err);
1142                }
1143        }
1144
1145        err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1146        if (err) {
1147                if (restore_pp) {
1148                        /* Things are still the way they were: Implicit
1149                         * DISASSOCIATE failed
1150                         */
1151                        memcpy(pp, &prev_pp, sizeof(*pp));
1152                } else {
1153                        memset(pp, 0, sizeof(*pp));
1154                        if (vf == PORT_SELF_VF)
1155                                memset(netdev->dev_addr, 0, ETH_ALEN);
1156                }
1157        } else {
1158                /* Set flag to indicate that the port assoc/disassoc
1159                 * request has been sent out to fw
1160                 */
1161                pp->set |= ENIC_PORT_REQUEST_APPLIED;
1162
1163                /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1164                if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1165                        memset(pp->mac_addr, 0, ETH_ALEN);
1166                        if (vf == PORT_SELF_VF)
1167                                memset(netdev->dev_addr, 0, ETH_ALEN);
1168                }
1169        }
1170
1171        if (vf == PORT_SELF_VF)
1172                memset(pp->vf_mac, 0, ETH_ALEN);
1173
1174        return err;
1175}
1176
1177static int enic_get_vf_port(struct net_device *netdev, int vf,
1178        struct sk_buff *skb)
1179{
1180        struct enic *enic = netdev_priv(netdev);
1181        u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1182        struct enic_port_profile *pp;
1183        int err;
1184
1185        ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1186        if (err)
1187                return err;
1188
1189        if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1190                return -ENODATA;
1191
1192        err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1193        if (err)
1194                return err;
1195
1196        NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
1197        NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1198        if (pp->set & ENIC_SET_NAME)
1199                NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1200                        pp->name);
1201        if (pp->set & ENIC_SET_INSTANCE)
1202                NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1203                        pp->instance_uuid);
1204        if (pp->set & ENIC_SET_HOST)
1205                NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1206                        pp->host_uuid);
1207
1208        return 0;
1209
1210nla_put_failure:
1211        return -EMSGSIZE;
1212}
1213
1214static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1215{
1216        struct enic *enic = vnic_dev_priv(rq->vdev);
1217
1218        if (!buf->os_buf)
1219                return;
1220
1221        pci_unmap_single(enic->pdev, buf->dma_addr,
1222                buf->len, PCI_DMA_FROMDEVICE);
1223        dev_kfree_skb_any(buf->os_buf);
1224}
1225
1226static int enic_rq_alloc_buf(struct vnic_rq *rq)
1227{
1228        struct enic *enic = vnic_dev_priv(rq->vdev);
1229        struct net_device *netdev = enic->netdev;
1230        struct sk_buff *skb;
1231        unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1232        unsigned int os_buf_index = 0;
1233        dma_addr_t dma_addr;
1234
1235        skb = netdev_alloc_skb_ip_align(netdev, len);
1236        if (!skb)
1237                return -ENOMEM;
1238
1239        dma_addr = pci_map_single(enic->pdev, skb->data,
1240                len, PCI_DMA_FROMDEVICE);
1241
1242        enic_queue_rq_desc(rq, skb, os_buf_index,
1243                dma_addr, len);
1244
1245        return 0;
1246}
1247
1248static void enic_rq_indicate_buf(struct vnic_rq *rq,
1249        struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1250        int skipped, void *opaque)
1251{
1252        struct enic *enic = vnic_dev_priv(rq->vdev);
1253        struct net_device *netdev = enic->netdev;
1254        struct sk_buff *skb;
1255
1256        u8 type, color, eop, sop, ingress_port, vlan_stripped;
1257        u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1258        u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1259        u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1260        u8 packet_error;
1261        u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1262        u32 rss_hash;
1263
1264        if (skipped)
1265                return;
1266
1267        skb = buf->os_buf;
1268        prefetch(skb->data - NET_IP_ALIGN);
1269        pci_unmap_single(enic->pdev, buf->dma_addr,
1270                buf->len, PCI_DMA_FROMDEVICE);
1271
1272        cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1273                &type, &color, &q_number, &completed_index,
1274                &ingress_port, &fcoe, &eop, &sop, &rss_type,
1275                &csum_not_calc, &rss_hash, &bytes_written,
1276                &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1277                &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1278                &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1279                &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1280                &fcs_ok);
1281
1282        if (packet_error) {
1283
1284                if (!fcs_ok) {
1285                        if (bytes_written > 0)
1286                                enic->rq_bad_fcs++;
1287                        else if (bytes_written == 0)
1288                                enic->rq_truncated_pkts++;
1289                }
1290
1291                dev_kfree_skb_any(skb);
1292
1293                return;
1294        }
1295
1296        if (eop && bytes_written > 0) {
1297
1298                /* Good receive
1299                 */
1300
1301                skb_put(skb, bytes_written);
1302                skb->protocol = eth_type_trans(skb, netdev);
1303
1304                if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1305                        skb->csum = htons(checksum);
1306                        skb->ip_summed = CHECKSUM_COMPLETE;
1307                }
1308
1309                skb->dev = netdev;
1310
1311                if (vlan_stripped)
1312                        __vlan_hwaccel_put_tag(skb, vlan_tci);
1313
1314                if (netdev->features & NETIF_F_GRO)
1315                        napi_gro_receive(&enic->napi[q_number], skb);
1316                else
1317                        netif_receive_skb(skb);
1318        } else {
1319
1320                /* Buffer overflow
1321                 */
1322
1323                dev_kfree_skb_any(skb);
1324        }
1325}
1326
1327static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1328        u8 type, u16 q_number, u16 completed_index, void *opaque)
1329{
1330        struct enic *enic = vnic_dev_priv(vdev);
1331
1332        vnic_rq_service(&enic->rq[q_number], cq_desc,
1333                completed_index, VNIC_RQ_RETURN_DESC,
1334                enic_rq_indicate_buf, opaque);
1335
1336        return 0;
1337}
1338
1339static int enic_poll(struct napi_struct *napi, int budget)
1340{
1341        struct net_device *netdev = napi->dev;
1342        struct enic *enic = netdev_priv(netdev);
1343        unsigned int cq_rq = enic_cq_rq(enic, 0);
1344        unsigned int cq_wq = enic_cq_wq(enic, 0);
1345        unsigned int intr = enic_legacy_io_intr();
1346        unsigned int rq_work_to_do = budget;
1347        unsigned int wq_work_to_do = -1; /* no limit */
1348        unsigned int  work_done, rq_work_done, wq_work_done;
1349        int err;
1350
1351        /* Service RQ (first) and WQ
1352         */
1353
1354        rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1355                rq_work_to_do, enic_rq_service, NULL);
1356
1357        wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1358                wq_work_to_do, enic_wq_service, NULL);
1359
1360        /* Accumulate intr event credits for this polling
1361         * cycle.  An intr event is the completion of a
1362         * a WQ or RQ packet.
1363         */
1364
1365        work_done = rq_work_done + wq_work_done;
1366
1367        if (work_done > 0)
1368                vnic_intr_return_credits(&enic->intr[intr],
1369                        work_done,
1370                        0 /* don't unmask intr */,
1371                        0 /* don't reset intr timer */);
1372
1373        err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1374
1375        /* Buffer allocation failed. Stay in polling
1376         * mode so we can try to fill the ring again.
1377         */
1378
1379        if (err)
1380                rq_work_done = rq_work_to_do;
1381
1382        if (rq_work_done < rq_work_to_do) {
1383
1384                /* Some work done, but not enough to stay in polling,
1385                 * exit polling
1386                 */
1387
1388                napi_complete(napi);
1389                vnic_intr_unmask(&enic->intr[intr]);
1390        }
1391
1392        return rq_work_done;
1393}
1394
1395static int enic_poll_msix(struct napi_struct *napi, int budget)
1396{
1397        struct net_device *netdev = napi->dev;
1398        struct enic *enic = netdev_priv(netdev);
1399        unsigned int rq = (napi - &enic->napi[0]);
1400        unsigned int cq = enic_cq_rq(enic, rq);
1401        unsigned int intr = enic_msix_rq_intr(enic, rq);
1402        unsigned int work_to_do = budget;
1403        unsigned int work_done;
1404        int err;
1405
1406        /* Service RQ
1407         */
1408
1409        work_done = vnic_cq_service(&enic->cq[cq],
1410                work_to_do, enic_rq_service, NULL);
1411
1412        /* Return intr event credits for this polling
1413         * cycle.  An intr event is the completion of a
1414         * RQ packet.
1415         */
1416
1417        if (work_done > 0)
1418                vnic_intr_return_credits(&enic->intr[intr],
1419                        work_done,
1420                        0 /* don't unmask intr */,
1421                        0 /* don't reset intr timer */);
1422
1423        err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1424
1425        /* Buffer allocation failed. Stay in polling mode
1426         * so we can try to fill the ring again.
1427         */
1428
1429        if (err)
1430                work_done = work_to_do;
1431
1432        if (work_done < work_to_do) {
1433
1434                /* Some work done, but not enough to stay in polling,
1435                 * exit polling
1436                 */
1437
1438                napi_complete(napi);
1439                vnic_intr_unmask(&enic->intr[intr]);
1440        }
1441
1442        return work_done;
1443}
1444
1445static void enic_notify_timer(unsigned long data)
1446{
1447        struct enic *enic = (struct enic *)data;
1448
1449        enic_notify_check(enic);
1450
1451        mod_timer(&enic->notify_timer,
1452                round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1453}
1454
1455static void enic_free_intr(struct enic *enic)
1456{
1457        struct net_device *netdev = enic->netdev;
1458        unsigned int i;
1459
1460        switch (vnic_dev_get_intr_mode(enic->vdev)) {
1461        case VNIC_DEV_INTR_MODE_INTX:
1462                free_irq(enic->pdev->irq, netdev);
1463                break;
1464        case VNIC_DEV_INTR_MODE_MSI:
1465                free_irq(enic->pdev->irq, enic);
1466                break;
1467        case VNIC_DEV_INTR_MODE_MSIX:
1468                for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1469                        if (enic->msix[i].requested)
1470                                free_irq(enic->msix_entry[i].vector,
1471                                        enic->msix[i].devid);
1472                break;
1473        default:
1474                break;
1475        }
1476}
1477
1478static int enic_request_intr(struct enic *enic)
1479{
1480        struct net_device *netdev = enic->netdev;
1481        unsigned int i, intr;
1482        int err = 0;
1483
1484        switch (vnic_dev_get_intr_mode(enic->vdev)) {
1485
1486        case VNIC_DEV_INTR_MODE_INTX:
1487
1488                err = request_irq(enic->pdev->irq, enic_isr_legacy,
1489                        IRQF_SHARED, netdev->name, netdev);
1490                break;
1491
1492        case VNIC_DEV_INTR_MODE_MSI:
1493
1494                err = request_irq(enic->pdev->irq, enic_isr_msi,
1495                        0, netdev->name, enic);
1496                break;
1497
1498        case VNIC_DEV_INTR_MODE_MSIX:
1499
1500                for (i = 0; i < enic->rq_count; i++) {
1501                        intr = enic_msix_rq_intr(enic, i);
1502                        sprintf(enic->msix[intr].devname,
1503                                "%.11s-rx-%d", netdev->name, i);
1504                        enic->msix[intr].isr = enic_isr_msix_rq;
1505                        enic->msix[intr].devid = &enic->napi[i];
1506                }
1507
1508                for (i = 0; i < enic->wq_count; i++) {
1509                        intr = enic_msix_wq_intr(enic, i);
1510                        sprintf(enic->msix[intr].devname,
1511                                "%.11s-tx-%d", netdev->name, i);
1512                        enic->msix[intr].isr = enic_isr_msix_wq;
1513                        enic->msix[intr].devid = enic;
1514                }
1515
1516                intr = enic_msix_err_intr(enic);
1517                sprintf(enic->msix[intr].devname,
1518                        "%.11s-err", netdev->name);
1519                enic->msix[intr].isr = enic_isr_msix_err;
1520                enic->msix[intr].devid = enic;
1521
1522                intr = enic_msix_notify_intr(enic);
1523                sprintf(enic->msix[intr].devname,
1524                        "%.11s-notify", netdev->name);
1525                enic->msix[intr].isr = enic_isr_msix_notify;
1526                enic->msix[intr].devid = enic;
1527
1528                for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1529                        enic->msix[i].requested = 0;
1530
1531                for (i = 0; i < enic->intr_count; i++) {
1532                        err = request_irq(enic->msix_entry[i].vector,
1533                                enic->msix[i].isr, 0,
1534                                enic->msix[i].devname,
1535                                enic->msix[i].devid);
1536                        if (err) {
1537                                enic_free_intr(enic);
1538                                break;
1539                        }
1540                        enic->msix[i].requested = 1;
1541                }
1542
1543                break;
1544
1545        default:
1546                break;
1547        }
1548
1549        return err;
1550}
1551
1552static void enic_synchronize_irqs(struct enic *enic)
1553{
1554        unsigned int i;
1555
1556        switch (vnic_dev_get_intr_mode(enic->vdev)) {
1557        case VNIC_DEV_INTR_MODE_INTX:
1558        case VNIC_DEV_INTR_MODE_MSI:
1559                synchronize_irq(enic->pdev->irq);
1560                break;
1561        case VNIC_DEV_INTR_MODE_MSIX:
1562                for (i = 0; i < enic->intr_count; i++)
1563                        synchronize_irq(enic->msix_entry[i].vector);
1564                break;
1565        default:
1566                break;
1567        }
1568}
1569
1570static int enic_dev_notify_set(struct enic *enic)
1571{
1572        int err;
1573
1574        spin_lock(&enic->devcmd_lock);
1575        switch (vnic_dev_get_intr_mode(enic->vdev)) {
1576        case VNIC_DEV_INTR_MODE_INTX:
1577                err = vnic_dev_notify_set(enic->vdev,
1578                        enic_legacy_notify_intr());
1579                break;
1580        case VNIC_DEV_INTR_MODE_MSIX:
1581                err = vnic_dev_notify_set(enic->vdev,
1582                        enic_msix_notify_intr(enic));
1583                break;
1584        default:
1585                err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1586                break;
1587        }
1588        spin_unlock(&enic->devcmd_lock);
1589
1590        return err;
1591}
1592
1593static void enic_notify_timer_start(struct enic *enic)
1594{
1595        switch (vnic_dev_get_intr_mode(enic->vdev)) {
1596        case VNIC_DEV_INTR_MODE_MSI:
1597                mod_timer(&enic->notify_timer, jiffies);
1598                break;
1599        default:
1600                /* Using intr for notification for INTx/MSI-X */
1601                break;
1602        }
1603}
1604
1605/* rtnl lock is held, process context */
1606static int enic_open(struct net_device *netdev)
1607{
1608        struct enic *enic = netdev_priv(netdev);
1609        unsigned int i;
1610        int err;
1611
1612        err = enic_request_intr(enic);
1613        if (err) {
1614                netdev_err(netdev, "Unable to request irq.\n");
1615                return err;
1616        }
1617
1618        err = enic_dev_notify_set(enic);
1619        if (err) {
1620                netdev_err(netdev,
1621                        "Failed to alloc notify buffer, aborting.\n");
1622                goto err_out_free_intr;
1623        }
1624
1625        for (i = 0; i < enic->rq_count; i++) {
1626                vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1627                /* Need at least one buffer on ring to get going */
1628                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1629                        netdev_err(netdev, "Unable to alloc receive buffers\n");
1630                        err = -ENOMEM;
1631                        goto err_out_notify_unset;
1632                }
1633        }
1634
1635        for (i = 0; i < enic->wq_count; i++)
1636                vnic_wq_enable(&enic->wq[i]);
1637        for (i = 0; i < enic->rq_count; i++)
1638                vnic_rq_enable(&enic->rq[i]);
1639
1640        if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1641                enic_dev_add_station_addr(enic);
1642
1643        enic_set_rx_mode(netdev);
1644
1645        netif_wake_queue(netdev);
1646
1647        for (i = 0; i < enic->rq_count; i++)
1648                napi_enable(&enic->napi[i]);
1649
1650        enic_dev_enable(enic);
1651
1652        for (i = 0; i < enic->intr_count; i++)
1653                vnic_intr_unmask(&enic->intr[i]);
1654
1655        enic_notify_timer_start(enic);
1656
1657        return 0;
1658
1659err_out_notify_unset:
1660        enic_dev_notify_unset(enic);
1661err_out_free_intr:
1662        enic_free_intr(enic);
1663
1664        return err;
1665}
1666
1667/* rtnl lock is held, process context */
1668static int enic_stop(struct net_device *netdev)
1669{
1670        struct enic *enic = netdev_priv(netdev);
1671        unsigned int i;
1672        int err;
1673
1674        for (i = 0; i < enic->intr_count; i++) {
1675                vnic_intr_mask(&enic->intr[i]);
1676                (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1677        }
1678
1679        enic_synchronize_irqs(enic);
1680
1681        del_timer_sync(&enic->notify_timer);
1682
1683        enic_dev_disable(enic);
1684
1685        for (i = 0; i < enic->rq_count; i++)
1686                napi_disable(&enic->napi[i]);
1687
1688        netif_carrier_off(netdev);
1689        netif_tx_disable(netdev);
1690
1691        if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1692                enic_dev_del_station_addr(enic);
1693
1694        for (i = 0; i < enic->wq_count; i++) {
1695                err = vnic_wq_disable(&enic->wq[i]);
1696                if (err)
1697                        return err;
1698        }
1699        for (i = 0; i < enic->rq_count; i++) {
1700                err = vnic_rq_disable(&enic->rq[i]);
1701                if (err)
1702                        return err;
1703        }
1704
1705        enic_dev_notify_unset(enic);
1706        enic_free_intr(enic);
1707
1708        for (i = 0; i < enic->wq_count; i++)
1709                vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1710        for (i = 0; i < enic->rq_count; i++)
1711                vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1712        for (i = 0; i < enic->cq_count; i++)
1713                vnic_cq_clean(&enic->cq[i]);
1714        for (i = 0; i < enic->intr_count; i++)
1715                vnic_intr_clean(&enic->intr[i]);
1716
1717        return 0;
1718}
1719
1720static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1721{
1722        struct enic *enic = netdev_priv(netdev);
1723        int running = netif_running(netdev);
1724
1725        if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1726                return -EINVAL;
1727
1728        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1729                return -EOPNOTSUPP;
1730
1731        if (running)
1732                enic_stop(netdev);
1733
1734        netdev->mtu = new_mtu;
1735
1736        if (netdev->mtu > enic->port_mtu)
1737                netdev_warn(netdev,
1738                        "interface MTU (%d) set higher than port MTU (%d)\n",
1739                        netdev->mtu, enic->port_mtu);
1740
1741        if (running)
1742                enic_open(netdev);
1743
1744        return 0;
1745}
1746
1747static void enic_change_mtu_work(struct work_struct *work)
1748{
1749        struct enic *enic = container_of(work, struct enic, change_mtu_work);
1750        struct net_device *netdev = enic->netdev;
1751        int new_mtu = vnic_dev_mtu(enic->vdev);
1752        int err;
1753        unsigned int i;
1754
1755        new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1756
1757        rtnl_lock();
1758
1759        /* Stop RQ */
1760        del_timer_sync(&enic->notify_timer);
1761
1762        for (i = 0; i < enic->rq_count; i++)
1763                napi_disable(&enic->napi[i]);
1764
1765        vnic_intr_mask(&enic->intr[0]);
1766        enic_synchronize_irqs(enic);
1767        err = vnic_rq_disable(&enic->rq[0]);
1768        if (err) {
1769                netdev_err(netdev, "Unable to disable RQ.\n");
1770                return;
1771        }
1772        vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1773        vnic_cq_clean(&enic->cq[0]);
1774        vnic_intr_clean(&enic->intr[0]);
1775
1776        /* Fill RQ with new_mtu-sized buffers */
1777        netdev->mtu = new_mtu;
1778        vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1779        /* Need at least one buffer on ring to get going */
1780        if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1781                netdev_err(netdev, "Unable to alloc receive buffers.\n");
1782                return;
1783        }
1784
1785        /* Start RQ */
1786        vnic_rq_enable(&enic->rq[0]);
1787        napi_enable(&enic->napi[0]);
1788        vnic_intr_unmask(&enic->intr[0]);
1789        enic_notify_timer_start(enic);
1790
1791        rtnl_unlock();
1792
1793        netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1794}
1795
1796#ifdef CONFIG_NET_POLL_CONTROLLER
1797static void enic_poll_controller(struct net_device *netdev)
1798{
1799        struct enic *enic = netdev_priv(netdev);
1800        struct vnic_dev *vdev = enic->vdev;
1801        unsigned int i, intr;
1802
1803        switch (vnic_dev_get_intr_mode(vdev)) {
1804        case VNIC_DEV_INTR_MODE_MSIX:
1805                for (i = 0; i < enic->rq_count; i++) {
1806                        intr = enic_msix_rq_intr(enic, i);
1807                        enic_isr_msix_rq(enic->msix_entry[intr].vector,
1808                                &enic->napi[i]);
1809                }
1810
1811                for (i = 0; i < enic->wq_count; i++) {
1812                        intr = enic_msix_wq_intr(enic, i);
1813                        enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1814                }
1815
1816                break;
1817        case VNIC_DEV_INTR_MODE_MSI:
1818                enic_isr_msi(enic->pdev->irq, enic);
1819                break;
1820        case VNIC_DEV_INTR_MODE_INTX:
1821                enic_isr_legacy(enic->pdev->irq, netdev);
1822                break;
1823        default:
1824                break;
1825        }
1826}
1827#endif
1828
1829static int enic_dev_wait(struct vnic_dev *vdev,
1830        int (*start)(struct vnic_dev *, int),
1831        int (*finished)(struct vnic_dev *, int *),
1832        int arg)
1833{
1834        unsigned long time;
1835        int done;
1836        int err;
1837
1838        BUG_ON(in_interrupt());
1839
1840        err = start(vdev, arg);
1841        if (err)
1842                return err;
1843
1844        /* Wait for func to complete...2 seconds max
1845         */
1846
1847        time = jiffies + (HZ * 2);
1848        do {
1849
1850                err = finished(vdev, &done);
1851                if (err)
1852                        return err;
1853
1854                if (done)
1855                        return 0;
1856
1857                schedule_timeout_uninterruptible(HZ / 10);
1858
1859        } while (time_after(time, jiffies));
1860
1861        return -ETIMEDOUT;
1862}
1863
1864static int enic_dev_open(struct enic *enic)
1865{
1866        int err;
1867
1868        err = enic_dev_wait(enic->vdev, vnic_dev_open,
1869                vnic_dev_open_done, 0);
1870        if (err)
1871                dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1872                        err);
1873
1874        return err;
1875}
1876
1877static int enic_dev_hang_reset(struct enic *enic)
1878{
1879        int err;
1880
1881        err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1882                vnic_dev_hang_reset_done, 0);
1883        if (err)
1884                netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1885                        err);
1886
1887        return err;
1888}
1889
1890static int enic_set_rsskey(struct enic *enic)
1891{
1892        dma_addr_t rss_key_buf_pa;
1893        union vnic_rss_key *rss_key_buf_va = NULL;
1894        union vnic_rss_key rss_key = {
1895                .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1896                .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1897                .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1898                .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1899        };
1900        int err;
1901
1902        rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1903                sizeof(union vnic_rss_key), &rss_key_buf_pa);
1904        if (!rss_key_buf_va)
1905                return -ENOMEM;
1906
1907        memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1908
1909        spin_lock(&enic->devcmd_lock);
1910        err = enic_set_rss_key(enic,
1911                rss_key_buf_pa,
1912                sizeof(union vnic_rss_key));
1913        spin_unlock(&enic->devcmd_lock);
1914
1915        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1916                rss_key_buf_va, rss_key_buf_pa);
1917
1918        return err;
1919}
1920
1921static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1922{
1923        dma_addr_t rss_cpu_buf_pa;
1924        union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1925        unsigned int i;
1926        int err;
1927
1928        rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1929                sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1930        if (!rss_cpu_buf_va)
1931                return -ENOMEM;
1932
1933        for (i = 0; i < (1 << rss_hash_bits); i++)
1934                (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1935
1936        spin_lock(&enic->devcmd_lock);
1937        err = enic_set_rss_cpu(enic,
1938                rss_cpu_buf_pa,
1939                sizeof(union vnic_rss_cpu));
1940        spin_unlock(&enic->devcmd_lock);
1941
1942        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1943                rss_cpu_buf_va, rss_cpu_buf_pa);
1944
1945        return err;
1946}
1947
1948static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1949        u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1950{
1951        const u8 tso_ipid_split_en = 0;
1952        const u8 ig_vlan_strip_en = 1;
1953        int err;
1954
1955        /* Enable VLAN tag stripping.
1956        */
1957
1958        spin_lock(&enic->devcmd_lock);
1959        err = enic_set_nic_cfg(enic,
1960                rss_default_cpu, rss_hash_type,
1961                rss_hash_bits, rss_base_cpu,
1962                rss_enable, tso_ipid_split_en,
1963                ig_vlan_strip_en);
1964        spin_unlock(&enic->devcmd_lock);
1965
1966        return err;
1967}
1968
1969static int enic_set_rss_nic_cfg(struct enic *enic)
1970{
1971        struct device *dev = enic_get_dev(enic);
1972        const u8 rss_default_cpu = 0;
1973        const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1974                NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1975                NIC_CFG_RSS_HASH_TYPE_IPV6 |
1976                NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1977        const u8 rss_hash_bits = 7;
1978        const u8 rss_base_cpu = 0;
1979        u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1980
1981        if (rss_enable) {
1982                if (!enic_set_rsskey(enic)) {
1983                        if (enic_set_rsscpu(enic, rss_hash_bits)) {
1984                                rss_enable = 0;
1985                                dev_warn(dev, "RSS disabled, "
1986                                        "Failed to set RSS cpu indirection table.");
1987                        }
1988                } else {
1989                        rss_enable = 0;
1990                        dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1991                }
1992        }
1993
1994        return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1995                rss_hash_bits, rss_base_cpu, rss_enable);
1996}
1997
1998static void enic_reset(struct work_struct *work)
1999{
2000        struct enic *enic = container_of(work, struct enic, reset);
2001
2002        if (!netif_running(enic->netdev))
2003                return;
2004
2005        rtnl_lock();
2006
2007        enic_dev_hang_notify(enic);
2008        enic_stop(enic->netdev);
2009        enic_dev_hang_reset(enic);
2010        enic_reset_addr_lists(enic);
2011        enic_init_vnic_resources(enic);
2012        enic_set_rss_nic_cfg(enic);
2013        enic_dev_set_ig_vlan_rewrite_mode(enic);
2014        enic_open(enic->netdev);
2015
2016        rtnl_unlock();
2017}
2018
2019static int enic_set_intr_mode(struct enic *enic)
2020{
2021        unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2022        unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2023        unsigned int i;
2024
2025        /* Set interrupt mode (INTx, MSI, MSI-X) depending
2026         * on system capabilities.
2027         *
2028         * Try MSI-X first
2029         *
2030         * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2031         * (the second to last INTR is used for WQ/RQ errors)
2032         * (the last INTR is used for notifications)
2033         */
2034
2035        BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2036        for (i = 0; i < n + m + 2; i++)
2037                enic->msix_entry[i].entry = i;
2038
2039        /* Use multiple RQs if RSS is enabled
2040         */
2041
2042        if (ENIC_SETTING(enic, RSS) &&
2043            enic->config.intr_mode < 1 &&
2044            enic->rq_count >= n &&
2045            enic->wq_count >= m &&
2046            enic->cq_count >= n + m &&
2047            enic->intr_count >= n + m + 2) {
2048
2049                if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2050
2051                        enic->rq_count = n;
2052                        enic->wq_count = m;
2053                        enic->cq_count = n + m;
2054                        enic->intr_count = n + m + 2;
2055
2056                        vnic_dev_set_intr_mode(enic->vdev,
2057                                VNIC_DEV_INTR_MODE_MSIX);
2058
2059                        return 0;
2060                }
2061        }
2062
2063        if (enic->config.intr_mode < 1 &&
2064            enic->rq_count >= 1 &&
2065            enic->wq_count >= m &&
2066            enic->cq_count >= 1 + m &&
2067            enic->intr_count >= 1 + m + 2) {
2068                if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2069
2070                        enic->rq_count = 1;
2071                        enic->wq_count = m;
2072                        enic->cq_count = 1 + m;
2073                        enic->intr_count = 1 + m + 2;
2074
2075                        vnic_dev_set_intr_mode(enic->vdev,
2076                                VNIC_DEV_INTR_MODE_MSIX);
2077
2078                        return 0;
2079                }
2080        }
2081
2082        /* Next try MSI
2083         *
2084         * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2085         */
2086
2087        if (enic->config.intr_mode < 2 &&
2088            enic->rq_count >= 1 &&
2089            enic->wq_count >= 1 &&
2090            enic->cq_count >= 2 &&
2091            enic->intr_count >= 1 &&
2092            !pci_enable_msi(enic->pdev)) {
2093
2094                enic->rq_count = 1;
2095                enic->wq_count = 1;
2096                enic->cq_count = 2;
2097                enic->intr_count = 1;
2098
2099                vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2100
2101                return 0;
2102        }
2103
2104        /* Next try INTx
2105         *
2106         * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2107         * (the first INTR is used for WQ/RQ)
2108         * (the second INTR is used for WQ/RQ errors)
2109         * (the last INTR is used for notifications)
2110         */
2111
2112        if (enic->config.intr_mode < 3 &&
2113            enic->rq_count >= 1 &&
2114            enic->wq_count >= 1 &&
2115            enic->cq_count >= 2 &&
2116            enic->intr_count >= 3) {
2117
2118                enic->rq_count = 1;
2119                enic->wq_count = 1;
2120                enic->cq_count = 2;
2121                enic->intr_count = 3;
2122
2123                vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2124
2125                return 0;
2126        }
2127
2128        vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2129
2130        return -EINVAL;
2131}
2132
2133static void enic_clear_intr_mode(struct enic *enic)
2134{
2135        switch (vnic_dev_get_intr_mode(enic->vdev)) {
2136        case VNIC_DEV_INTR_MODE_MSIX:
2137                pci_disable_msix(enic->pdev);
2138                break;
2139        case VNIC_DEV_INTR_MODE_MSI:
2140                pci_disable_msi(enic->pdev);
2141                break;
2142        default:
2143                break;
2144        }
2145
2146        vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2147}
2148
2149static const struct net_device_ops enic_netdev_dynamic_ops = {
2150        .ndo_open               = enic_open,
2151        .ndo_stop               = enic_stop,
2152        .ndo_start_xmit         = enic_hard_start_xmit,
2153        .ndo_get_stats64        = enic_get_stats,
2154        .ndo_validate_addr      = eth_validate_addr,
2155        .ndo_set_rx_mode        = enic_set_rx_mode,
2156        .ndo_set_mac_address    = enic_set_mac_address_dynamic,
2157        .ndo_change_mtu         = enic_change_mtu,
2158        .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2159        .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2160        .ndo_tx_timeout         = enic_tx_timeout,
2161        .ndo_set_vf_port        = enic_set_vf_port,
2162        .ndo_get_vf_port        = enic_get_vf_port,
2163        .ndo_set_vf_mac         = enic_set_vf_mac,
2164#ifdef CONFIG_NET_POLL_CONTROLLER
2165        .ndo_poll_controller    = enic_poll_controller,
2166#endif
2167};
2168
2169static const struct net_device_ops enic_netdev_ops = {
2170        .ndo_open               = enic_open,
2171        .ndo_stop               = enic_stop,
2172        .ndo_start_xmit         = enic_hard_start_xmit,
2173        .ndo_get_stats64        = enic_get_stats,
2174        .ndo_validate_addr      = eth_validate_addr,
2175        .ndo_set_mac_address    = enic_set_mac_address,
2176        .ndo_set_rx_mode        = enic_set_rx_mode,
2177        .ndo_change_mtu         = enic_change_mtu,
2178        .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2179        .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2180        .ndo_tx_timeout         = enic_tx_timeout,
2181        .ndo_set_vf_port        = enic_set_vf_port,
2182        .ndo_get_vf_port        = enic_get_vf_port,
2183        .ndo_set_vf_mac         = enic_set_vf_mac,
2184#ifdef CONFIG_NET_POLL_CONTROLLER
2185        .ndo_poll_controller    = enic_poll_controller,
2186#endif
2187};
2188
2189static void enic_dev_deinit(struct enic *enic)
2190{
2191        unsigned int i;
2192
2193        for (i = 0; i < enic->rq_count; i++)
2194                netif_napi_del(&enic->napi[i]);
2195
2196        enic_free_vnic_resources(enic);
2197        enic_clear_intr_mode(enic);
2198}
2199
2200static int enic_dev_init(struct enic *enic)
2201{
2202        struct device *dev = enic_get_dev(enic);
2203        struct net_device *netdev = enic->netdev;
2204        unsigned int i;
2205        int err;
2206
2207        /* Get interrupt coalesce timer info */
2208        err = enic_dev_intr_coal_timer_info(enic);
2209        if (err) {
2210                dev_warn(dev, "Using default conversion factor for "
2211                        "interrupt coalesce timer\n");
2212                vnic_dev_intr_coal_timer_info_default(enic->vdev);
2213        }
2214
2215        /* Get vNIC configuration
2216         */
2217
2218        err = enic_get_vnic_config(enic);
2219        if (err) {
2220                dev_err(dev, "Get vNIC configuration failed, aborting\n");
2221                return err;
2222        }
2223
2224        /* Get available resource counts
2225         */
2226
2227        enic_get_res_counts(enic);
2228
2229        /* Set interrupt mode based on resource counts and system
2230         * capabilities
2231         */
2232
2233        err = enic_set_intr_mode(enic);
2234        if (err) {
2235                dev_err(dev, "Failed to set intr mode based on resource "
2236                        "counts and system capabilities, aborting\n");
2237                return err;
2238        }
2239
2240        /* Allocate and configure vNIC resources
2241         */
2242
2243        err = enic_alloc_vnic_resources(enic);
2244        if (err) {
2245                dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2246                goto err_out_free_vnic_resources;
2247        }
2248
2249        enic_init_vnic_resources(enic);
2250
2251        err = enic_set_rss_nic_cfg(enic);
2252        if (err) {
2253                dev_err(dev, "Failed to config nic, aborting\n");
2254                goto err_out_free_vnic_resources;
2255        }
2256
2257        switch (vnic_dev_get_intr_mode(enic->vdev)) {
2258        default:
2259                netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2260                break;
2261        case VNIC_DEV_INTR_MODE_MSIX:
2262                for (i = 0; i < enic->rq_count; i++)
2263                        netif_napi_add(netdev, &enic->napi[i],
2264                                enic_poll_msix, 64);
2265                break;
2266        }
2267
2268        return 0;
2269
2270err_out_free_vnic_resources:
2271        enic_clear_intr_mode(enic);
2272        enic_free_vnic_resources(enic);
2273
2274        return err;
2275}
2276
2277static void enic_iounmap(struct enic *enic)
2278{
2279        unsigned int i;
2280
2281        for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2282                if (enic->bar[i].vaddr)
2283                        iounmap(enic->bar[i].vaddr);
2284}
2285
2286static int __devinit enic_probe(struct pci_dev *pdev,
2287        const struct pci_device_id *ent)
2288{
2289        struct device *dev = &pdev->dev;
2290        struct net_device *netdev;
2291        struct enic *enic;
2292        int using_dac = 0;
2293        unsigned int i;
2294        int err;
2295#ifdef CONFIG_PCI_IOV
2296        int pos = 0;
2297#endif
2298        int num_pps = 1;
2299
2300        /* Allocate net device structure and initialize.  Private
2301         * instance data is initialized to zero.
2302         */
2303
2304        netdev = alloc_etherdev(sizeof(struct enic));
2305        if (!netdev)
2306                return -ENOMEM;
2307
2308        pci_set_drvdata(pdev, netdev);
2309
2310        SET_NETDEV_DEV(netdev, &pdev->dev);
2311
2312        enic = netdev_priv(netdev);
2313        enic->netdev = netdev;
2314        enic->pdev = pdev;
2315
2316        /* Setup PCI resources
2317         */
2318
2319        err = pci_enable_device_mem(pdev);
2320        if (err) {
2321                dev_err(dev, "Cannot enable PCI device, aborting\n");
2322                goto err_out_free_netdev;
2323        }
2324
2325        err = pci_request_regions(pdev, DRV_NAME);
2326        if (err) {
2327                dev_err(dev, "Cannot request PCI regions, aborting\n");
2328                goto err_out_disable_device;
2329        }
2330
2331        pci_set_master(pdev);
2332
2333        /* Query PCI controller on system for DMA addressing
2334         * limitation for the device.  Try 40-bit first, and
2335         * fail to 32-bit.
2336         */
2337
2338        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2339        if (err) {
2340                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2341                if (err) {
2342                        dev_err(dev, "No usable DMA configuration, aborting\n");
2343                        goto err_out_release_regions;
2344                }
2345                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2346                if (err) {
2347                        dev_err(dev, "Unable to obtain %u-bit DMA "
2348                                "for consistent allocations, aborting\n", 32);
2349                        goto err_out_release_regions;
2350                }
2351        } else {
2352                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2353                if (err) {
2354                        dev_err(dev, "Unable to obtain %u-bit DMA "
2355                                "for consistent allocations, aborting\n", 40);
2356                        goto err_out_release_regions;
2357                }
2358                using_dac = 1;
2359        }
2360
2361        /* Map vNIC resources from BAR0-5
2362         */
2363
2364        for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2365                if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2366                        continue;
2367                enic->bar[i].len = pci_resource_len(pdev, i);
2368                enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2369                if (!enic->bar[i].vaddr) {
2370                        dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2371                        err = -ENODEV;
2372                        goto err_out_iounmap;
2373                }
2374                enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2375        }
2376
2377        /* Register vNIC device
2378         */
2379
2380        enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2381                ARRAY_SIZE(enic->bar));
2382        if (!enic->vdev) {
2383                dev_err(dev, "vNIC registration failed, aborting\n");
2384                err = -ENODEV;
2385                goto err_out_iounmap;
2386        }
2387
2388#ifdef CONFIG_PCI_IOV
2389        /* Get number of subvnics */
2390        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2391        if (pos) {
2392                pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2393                        &enic->num_vfs);
2394                if (enic->num_vfs) {
2395                        err = pci_enable_sriov(pdev, enic->num_vfs);
2396                        if (err) {
2397                                dev_err(dev, "SRIOV enable failed, aborting."
2398                                        " pci_enable_sriov() returned %d\n",
2399                                        err);
2400                                goto err_out_vnic_unregister;
2401                        }
2402                        enic->priv_flags |= ENIC_SRIOV_ENABLED;
2403                        num_pps = enic->num_vfs;
2404                }
2405        }
2406#endif
2407
2408        /* Allocate structure for port profiles */
2409        enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2410        if (!enic->pp) {
2411                err = -ENOMEM;
2412                goto err_out_disable_sriov_pp;
2413        }
2414
2415        /* Issue device open to get device in known state
2416         */
2417
2418        err = enic_dev_open(enic);
2419        if (err) {
2420                dev_err(dev, "vNIC dev open failed, aborting\n");
2421                goto err_out_disable_sriov;
2422        }
2423
2424        /* Setup devcmd lock
2425         */
2426
2427        spin_lock_init(&enic->devcmd_lock);
2428
2429        /*
2430         * Set ingress vlan rewrite mode before vnic initialization
2431         */
2432
2433        err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2434        if (err) {
2435                dev_err(dev,
2436                        "Failed to set ingress vlan rewrite mode, aborting.\n");
2437                goto err_out_dev_close;
2438        }
2439
2440        /* Issue device init to initialize the vnic-to-switch link.
2441         * We'll start with carrier off and wait for link UP
2442         * notification later to turn on carrier.  We don't need
2443         * to wait here for the vnic-to-switch link initialization
2444         * to complete; link UP notification is the indication that
2445         * the process is complete.
2446         */
2447
2448        netif_carrier_off(netdev);
2449
2450        /* Do not call dev_init for a dynamic vnic.
2451         * For a dynamic vnic, init_prov_info will be
2452         * called later by an upper layer.
2453         */
2454
2455        if (!enic_is_dynamic(enic)) {
2456                err = vnic_dev_init(enic->vdev, 0);
2457                if (err) {
2458                        dev_err(dev, "vNIC dev init failed, aborting\n");
2459                        goto err_out_dev_close;
2460                }
2461        }
2462
2463        err = enic_dev_init(enic);
2464        if (err) {
2465                dev_err(dev, "Device initialization failed, aborting\n");
2466                goto err_out_dev_close;
2467        }
2468
2469        /* Setup notification timer, HW reset task, and wq locks
2470         */
2471
2472        init_timer(&enic->notify_timer);
2473        enic->notify_timer.function = enic_notify_timer;
2474        enic->notify_timer.data = (unsigned long)enic;
2475
2476        INIT_WORK(&enic->reset, enic_reset);
2477        INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2478
2479        for (i = 0; i < enic->wq_count; i++)
2480                spin_lock_init(&enic->wq_lock[i]);
2481
2482        /* Register net device
2483         */
2484
2485        enic->port_mtu = enic->config.mtu;
2486        (void)enic_change_mtu(netdev, enic->port_mtu);
2487
2488        err = enic_set_mac_addr(netdev, enic->mac_addr);
2489        if (err) {
2490                dev_err(dev, "Invalid MAC address, aborting\n");
2491                goto err_out_dev_deinit;
2492        }
2493
2494        enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2495        enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2496
2497        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2498                netdev->netdev_ops = &enic_netdev_dynamic_ops;
2499        else
2500                netdev->netdev_ops = &enic_netdev_ops;
2501
2502        netdev->watchdog_timeo = 2 * HZ;
2503        netdev->ethtool_ops = &enic_ethtool_ops;
2504
2505        netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2506        if (ENIC_SETTING(enic, LOOP)) {
2507                netdev->features &= ~NETIF_F_HW_VLAN_TX;
2508                enic->loop_enable = 1;
2509                enic->loop_tag = enic->config.loop_tag;
2510                dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2511        }
2512        if (ENIC_SETTING(enic, TXCSUM))
2513                netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2514        if (ENIC_SETTING(enic, TSO))
2515                netdev->hw_features |= NETIF_F_TSO |
2516                        NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2517        if (ENIC_SETTING(enic, RXCSUM))
2518                netdev->hw_features |= NETIF_F_RXCSUM;
2519
2520        netdev->features |= netdev->hw_features;
2521
2522        if (using_dac)
2523                netdev->features |= NETIF_F_HIGHDMA;
2524
2525        netdev->priv_flags |= IFF_UNICAST_FLT;
2526
2527        err = register_netdev(netdev);
2528        if (err) {
2529                dev_err(dev, "Cannot register net device, aborting\n");
2530                goto err_out_dev_deinit;
2531        }
2532
2533        return 0;
2534
2535err_out_dev_deinit:
2536        enic_dev_deinit(enic);
2537err_out_dev_close:
2538        vnic_dev_close(enic->vdev);
2539err_out_disable_sriov:
2540        kfree(enic->pp);
2541err_out_disable_sriov_pp:
2542#ifdef CONFIG_PCI_IOV
2543        if (enic_sriov_enabled(enic)) {
2544                pci_disable_sriov(pdev);
2545                enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2546        }
2547err_out_vnic_unregister:
2548#endif
2549        vnic_dev_unregister(enic->vdev);
2550err_out_iounmap:
2551        enic_iounmap(enic);
2552err_out_release_regions:
2553        pci_release_regions(pdev);
2554err_out_disable_device:
2555        pci_disable_device(pdev);
2556err_out_free_netdev:
2557        pci_set_drvdata(pdev, NULL);
2558        free_netdev(netdev);
2559
2560        return err;
2561}
2562
2563static void __devexit enic_remove(struct pci_dev *pdev)
2564{
2565        struct net_device *netdev = pci_get_drvdata(pdev);
2566
2567        if (netdev) {
2568                struct enic *enic = netdev_priv(netdev);
2569
2570                cancel_work_sync(&enic->reset);
2571                cancel_work_sync(&enic->change_mtu_work);
2572                unregister_netdev(netdev);
2573                enic_dev_deinit(enic);
2574                vnic_dev_close(enic->vdev);
2575#ifdef CONFIG_PCI_IOV
2576                if (enic_sriov_enabled(enic)) {
2577                        pci_disable_sriov(pdev);
2578                        enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2579                }
2580#endif
2581                kfree(enic->pp);
2582                vnic_dev_unregister(enic->vdev);
2583                enic_iounmap(enic);
2584                pci_release_regions(pdev);
2585                pci_disable_device(pdev);
2586                pci_set_drvdata(pdev, NULL);
2587                free_netdev(netdev);
2588        }
2589}
2590
2591static struct pci_driver enic_driver = {
2592        .name = DRV_NAME,
2593        .id_table = enic_id_table,
2594        .probe = enic_probe,
2595        .remove = __devexit_p(enic_remove),
2596};
2597
2598static int __init enic_init_module(void)
2599{
2600        pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2601
2602        return pci_register_driver(&enic_driver);
2603}
2604
2605static void __exit enic_cleanup_module(void)
2606{
2607        pci_unregister_driver(&enic_driver);
2608}
2609
2610module_init(enic_init_module);
2611module_exit(enic_cleanup_module);
2612