linux/drivers/scsi/fnic/fnic_main.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/module.h>
  19#include <linux/mempool.h>
  20#include <linux/string.h>
  21#include <linux/slab.h>
  22#include <linux/errno.h>
  23#include <linux/init.h>
  24#include <linux/pci.h>
  25#include <linux/skbuff.h>
  26#include <linux/interrupt.h>
  27#include <linux/spinlock.h>
  28#include <linux/workqueue.h>
  29#include <linux/if_ether.h>
  30#include <scsi/fc/fc_fip.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_transport.h>
  33#include <scsi/scsi_transport_fc.h>
  34#include <scsi/scsi_tcq.h>
  35#include <scsi/libfc.h>
  36#include <scsi/fc_frame.h>
  37
  38#include "vnic_dev.h"
  39#include "vnic_intr.h"
  40#include "vnic_stats.h"
  41#include "fnic_io.h"
  42#include "fnic_fip.h"
  43#include "fnic.h"
  44
  45#define PCI_DEVICE_ID_CISCO_FNIC        0x0045
  46
  47/* Timer to poll notification area for events. Used for MSI interrupts */
  48#define FNIC_NOTIFY_TIMER_PERIOD        (2 * HZ)
  49
  50static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
  51static struct kmem_cache *fnic_io_req_cache;
  52LIST_HEAD(fnic_list);
  53DEFINE_SPINLOCK(fnic_list_lock);
  54
  55/* Supported devices by fnic module */
  56static struct pci_device_id fnic_id_table[] = {
  57        { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
  58        { 0, }
  59};
  60
  61MODULE_DESCRIPTION(DRV_DESCRIPTION);
  62MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
  63              "Joseph R. Eykholt <jeykholt@cisco.com>");
  64MODULE_LICENSE("GPL v2");
  65MODULE_VERSION(DRV_VERSION);
  66MODULE_DEVICE_TABLE(pci, fnic_id_table);
  67
  68unsigned int fnic_log_level;
  69module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
  70MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
  71
  72unsigned int fnic_trace_max_pages = 16;
  73module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
  74MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
  75                                        "for fnic trace buffer");
  76
  77static struct libfc_function_template fnic_transport_template = {
  78        .frame_send = fnic_send,
  79        .lport_set_port_id = fnic_set_port_id,
  80        .fcp_abort_io = fnic_empty_scsi_cleanup,
  81        .fcp_cleanup = fnic_empty_scsi_cleanup,
  82        .exch_mgr_reset = fnic_exch_mgr_reset
  83};
  84
  85static int fnic_slave_alloc(struct scsi_device *sdev)
  86{
  87        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  88
  89        sdev->tagged_supported = 1;
  90
  91        if (!rport || fc_remote_port_chkready(rport))
  92                return -ENXIO;
  93
  94        scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
  95        return 0;
  96}
  97
  98static struct scsi_host_template fnic_host_template = {
  99        .module = THIS_MODULE,
 100        .name = DRV_NAME,
 101        .queuecommand = fnic_queuecommand,
 102        .eh_abort_handler = fnic_abort_cmd,
 103        .eh_device_reset_handler = fnic_device_reset,
 104        .eh_host_reset_handler = fnic_host_reset,
 105        .slave_alloc = fnic_slave_alloc,
 106        .change_queue_depth = fc_change_queue_depth,
 107        .change_queue_type = fc_change_queue_type,
 108        .this_id = -1,
 109        .cmd_per_lun = 3,
 110        .can_queue = FNIC_MAX_IO_REQ,
 111        .use_clustering = ENABLE_CLUSTERING,
 112        .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
 113        .max_sectors = 0xffff,
 114        .shost_attrs = fnic_attrs,
 115};
 116
 117static void
 118fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
 119{
 120        if (timeout)
 121                rport->dev_loss_tmo = timeout;
 122        else
 123                rport->dev_loss_tmo = 1;
 124}
 125
 126static void fnic_get_host_speed(struct Scsi_Host *shost);
 127static struct scsi_transport_template *fnic_fc_transport;
 128static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
 129
 130static struct fc_function_template fnic_fc_functions = {
 131
 132        .show_host_node_name = 1,
 133        .show_host_port_name = 1,
 134        .show_host_supported_classes = 1,
 135        .show_host_supported_fc4s = 1,
 136        .show_host_active_fc4s = 1,
 137        .show_host_maxframe_size = 1,
 138        .show_host_port_id = 1,
 139        .show_host_supported_speeds = 1,
 140        .get_host_speed = fnic_get_host_speed,
 141        .show_host_speed = 1,
 142        .show_host_port_type = 1,
 143        .get_host_port_state = fc_get_host_port_state,
 144        .show_host_port_state = 1,
 145        .show_host_symbolic_name = 1,
 146        .show_rport_maxframe_size = 1,
 147        .show_rport_supported_classes = 1,
 148        .show_host_fabric_name = 1,
 149        .show_starget_node_name = 1,
 150        .show_starget_port_name = 1,
 151        .show_starget_port_id = 1,
 152        .show_rport_dev_loss_tmo = 1,
 153        .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
 154        .issue_fc_host_lip = fnic_reset,
 155        .get_fc_host_stats = fnic_get_stats,
 156        .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
 157        .terminate_rport_io = fnic_terminate_rport_io,
 158        .bsg_request = fc_lport_bsg_request,
 159};
 160
 161static void fnic_get_host_speed(struct Scsi_Host *shost)
 162{
 163        struct fc_lport *lp = shost_priv(shost);
 164        struct fnic *fnic = lport_priv(lp);
 165        u32 port_speed = vnic_dev_port_speed(fnic->vdev);
 166
 167        /* Add in other values as they get defined in fw */
 168        switch (port_speed) {
 169        case 10000:
 170                fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 171                break;
 172        default:
 173                fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 174                break;
 175        }
 176}
 177
 178static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
 179{
 180        int ret;
 181        struct fc_lport *lp = shost_priv(host);
 182        struct fnic *fnic = lport_priv(lp);
 183        struct fc_host_statistics *stats = &lp->host_stats;
 184        struct vnic_stats *vs;
 185        unsigned long flags;
 186
 187        if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
 188                return stats;
 189        fnic->stats_time = jiffies;
 190
 191        spin_lock_irqsave(&fnic->fnic_lock, flags);
 192        ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
 193        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 194
 195        if (ret) {
 196                FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
 197                              "fnic: Get vnic stats failed"
 198                              " 0x%x", ret);
 199                return stats;
 200        }
 201        vs = fnic->stats;
 202        stats->tx_frames = vs->tx.tx_unicast_frames_ok;
 203        stats->tx_words  = vs->tx.tx_unicast_bytes_ok / 4;
 204        stats->rx_frames = vs->rx.rx_unicast_frames_ok;
 205        stats->rx_words  = vs->rx.rx_unicast_bytes_ok / 4;
 206        stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
 207        stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
 208        stats->invalid_crc_count = vs->rx.rx_crc_errors;
 209        stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
 210        stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
 211        stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
 212
 213        return stats;
 214}
 215
 216void fnic_log_q_error(struct fnic *fnic)
 217{
 218        unsigned int i;
 219        u32 error_status;
 220
 221        for (i = 0; i < fnic->raw_wq_count; i++) {
 222                error_status = ioread32(&fnic->wq[i].ctrl->error_status);
 223                if (error_status)
 224                        shost_printk(KERN_ERR, fnic->lport->host,
 225                                     "WQ[%d] error_status"
 226                                     " %d\n", i, error_status);
 227        }
 228
 229        for (i = 0; i < fnic->rq_count; i++) {
 230                error_status = ioread32(&fnic->rq[i].ctrl->error_status);
 231                if (error_status)
 232                        shost_printk(KERN_ERR, fnic->lport->host,
 233                                     "RQ[%d] error_status"
 234                                     " %d\n", i, error_status);
 235        }
 236
 237        for (i = 0; i < fnic->wq_copy_count; i++) {
 238                error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
 239                if (error_status)
 240                        shost_printk(KERN_ERR, fnic->lport->host,
 241                                     "CWQ[%d] error_status"
 242                                     " %d\n", i, error_status);
 243        }
 244}
 245
 246void fnic_handle_link_event(struct fnic *fnic)
 247{
 248        unsigned long flags;
 249
 250        spin_lock_irqsave(&fnic->fnic_lock, flags);
 251        if (fnic->stop_rx_link_events) {
 252                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 253                return;
 254        }
 255        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 256
 257        queue_work(fnic_event_queue, &fnic->link_work);
 258
 259}
 260
 261static int fnic_notify_set(struct fnic *fnic)
 262{
 263        int err;
 264
 265        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
 266        case VNIC_DEV_INTR_MODE_INTX:
 267                err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
 268                break;
 269        case VNIC_DEV_INTR_MODE_MSI:
 270                err = vnic_dev_notify_set(fnic->vdev, -1);
 271                break;
 272        case VNIC_DEV_INTR_MODE_MSIX:
 273                err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
 274                break;
 275        default:
 276                shost_printk(KERN_ERR, fnic->lport->host,
 277                             "Interrupt mode should be set up"
 278                             " before devcmd notify set %d\n",
 279                             vnic_dev_get_intr_mode(fnic->vdev));
 280                err = -1;
 281                break;
 282        }
 283
 284        return err;
 285}
 286
 287static void fnic_notify_timer(unsigned long data)
 288{
 289        struct fnic *fnic = (struct fnic *)data;
 290
 291        fnic_handle_link_event(fnic);
 292        mod_timer(&fnic->notify_timer,
 293                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 294}
 295
 296static void fnic_fip_notify_timer(unsigned long data)
 297{
 298        struct fnic *fnic = (struct fnic *)data;
 299
 300        fnic_handle_fip_timer(fnic);
 301}
 302
 303static void fnic_notify_timer_start(struct fnic *fnic)
 304{
 305        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
 306        case VNIC_DEV_INTR_MODE_MSI:
 307                /*
 308                 * Schedule first timeout immediately. The driver is
 309                 * initiatialized and ready to look for link up notification
 310                 */
 311                mod_timer(&fnic->notify_timer, jiffies);
 312                break;
 313        default:
 314                /* Using intr for notification for INTx/MSI-X */
 315                break;
 316        };
 317}
 318
 319static int fnic_dev_wait(struct vnic_dev *vdev,
 320                         int (*start)(struct vnic_dev *, int),
 321                         int (*finished)(struct vnic_dev *, int *),
 322                         int arg)
 323{
 324        unsigned long time;
 325        int done;
 326        int err;
 327
 328        err = start(vdev, arg);
 329        if (err)
 330                return err;
 331
 332        /* Wait for func to complete...2 seconds max */
 333        time = jiffies + (HZ * 2);
 334        do {
 335                err = finished(vdev, &done);
 336                if (err)
 337                        return err;
 338                if (done)
 339                        return 0;
 340                schedule_timeout_uninterruptible(HZ / 10);
 341        } while (time_after(time, jiffies));
 342
 343        return -ETIMEDOUT;
 344}
 345
 346static int fnic_cleanup(struct fnic *fnic)
 347{
 348        unsigned int i;
 349        int err;
 350
 351        vnic_dev_disable(fnic->vdev);
 352        for (i = 0; i < fnic->intr_count; i++)
 353                vnic_intr_mask(&fnic->intr[i]);
 354
 355        for (i = 0; i < fnic->rq_count; i++) {
 356                err = vnic_rq_disable(&fnic->rq[i]);
 357                if (err)
 358                        return err;
 359        }
 360        for (i = 0; i < fnic->raw_wq_count; i++) {
 361                err = vnic_wq_disable(&fnic->wq[i]);
 362                if (err)
 363                        return err;
 364        }
 365        for (i = 0; i < fnic->wq_copy_count; i++) {
 366                err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
 367                if (err)
 368                        return err;
 369        }
 370
 371        /* Clean up completed IOs and FCS frames */
 372        fnic_wq_copy_cmpl_handler(fnic, -1);
 373        fnic_wq_cmpl_handler(fnic, -1);
 374        fnic_rq_cmpl_handler(fnic, -1);
 375
 376        /* Clean up the IOs and FCS frames that have not completed */
 377        for (i = 0; i < fnic->raw_wq_count; i++)
 378                vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
 379        for (i = 0; i < fnic->rq_count; i++)
 380                vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
 381        for (i = 0; i < fnic->wq_copy_count; i++)
 382                vnic_wq_copy_clean(&fnic->wq_copy[i],
 383                                   fnic_wq_copy_cleanup_handler);
 384
 385        for (i = 0; i < fnic->cq_count; i++)
 386                vnic_cq_clean(&fnic->cq[i]);
 387        for (i = 0; i < fnic->intr_count; i++)
 388                vnic_intr_clean(&fnic->intr[i]);
 389
 390        mempool_destroy(fnic->io_req_pool);
 391        for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
 392                mempool_destroy(fnic->io_sgl_pool[i]);
 393
 394        return 0;
 395}
 396
 397static void fnic_iounmap(struct fnic *fnic)
 398{
 399        if (fnic->bar0.vaddr)
 400                iounmap(fnic->bar0.vaddr);
 401}
 402
 403/**
 404 * fnic_get_mac() - get assigned data MAC address for FIP code.
 405 * @lport:      local port.
 406 */
 407static u8 *fnic_get_mac(struct fc_lport *lport)
 408{
 409        struct fnic *fnic = lport_priv(lport);
 410
 411        return fnic->data_src_addr;
 412}
 413
 414static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
 415{
 416        u16 old_vlan;
 417        old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
 418}
 419
 420static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 421{
 422        struct Scsi_Host *host;
 423        struct fc_lport *lp;
 424        struct fnic *fnic;
 425        mempool_t *pool;
 426        int err;
 427        int i;
 428        unsigned long flags;
 429
 430        /*
 431         * Allocate SCSI Host and set up association between host,
 432         * local port, and fnic
 433         */
 434        lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
 435        if (!lp) {
 436                printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
 437                err = -ENOMEM;
 438                goto err_out;
 439        }
 440        host = lp->host;
 441        fnic = lport_priv(lp);
 442        fnic->lport = lp;
 443        fnic->ctlr.lp = lp;
 444
 445        snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
 446                 host->host_no);
 447
 448        host->transportt = fnic_fc_transport;
 449
 450        err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
 451        if (err) {
 452                shost_printk(KERN_ERR, fnic->lport->host,
 453                             "Unable to alloc shared tag map\n");
 454                goto err_out_free_hba;
 455        }
 456
 457        /* Setup PCI resources */
 458        pci_set_drvdata(pdev, fnic);
 459
 460        fnic->pdev = pdev;
 461
 462        err = pci_enable_device(pdev);
 463        if (err) {
 464                shost_printk(KERN_ERR, fnic->lport->host,
 465                             "Cannot enable PCI device, aborting.\n");
 466                goto err_out_free_hba;
 467        }
 468
 469        err = pci_request_regions(pdev, DRV_NAME);
 470        if (err) {
 471                shost_printk(KERN_ERR, fnic->lport->host,
 472                             "Cannot enable PCI resources, aborting\n");
 473                goto err_out_disable_device;
 474        }
 475
 476        pci_set_master(pdev);
 477
 478        /* Query PCI controller on system for DMA addressing
 479         * limitation for the device.  Try 40-bit first, and
 480         * fail to 32-bit.
 481         */
 482        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
 483        if (err) {
 484                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 485                if (err) {
 486                        shost_printk(KERN_ERR, fnic->lport->host,
 487                                     "No usable DMA configuration "
 488                                     "aborting\n");
 489                        goto err_out_release_regions;
 490                }
 491                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 492                if (err) {
 493                        shost_printk(KERN_ERR, fnic->lport->host,
 494                                     "Unable to obtain 32-bit DMA "
 495                                     "for consistent allocations, aborting.\n");
 496                        goto err_out_release_regions;
 497                }
 498        } else {
 499                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
 500                if (err) {
 501                        shost_printk(KERN_ERR, fnic->lport->host,
 502                                     "Unable to obtain 40-bit DMA "
 503                                     "for consistent allocations, aborting.\n");
 504                        goto err_out_release_regions;
 505                }
 506        }
 507
 508        /* Map vNIC resources from BAR0 */
 509        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 510                shost_printk(KERN_ERR, fnic->lport->host,
 511                             "BAR0 not memory-map'able, aborting.\n");
 512                err = -ENODEV;
 513                goto err_out_release_regions;
 514        }
 515
 516        fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
 517        fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
 518        fnic->bar0.len = pci_resource_len(pdev, 0);
 519
 520        if (!fnic->bar0.vaddr) {
 521                shost_printk(KERN_ERR, fnic->lport->host,
 522                             "Cannot memory-map BAR0 res hdr, "
 523                             "aborting.\n");
 524                err = -ENODEV;
 525                goto err_out_release_regions;
 526        }
 527
 528        fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
 529        if (!fnic->vdev) {
 530                shost_printk(KERN_ERR, fnic->lport->host,
 531                             "vNIC registration failed, "
 532                             "aborting.\n");
 533                err = -ENODEV;
 534                goto err_out_iounmap;
 535        }
 536
 537        err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
 538                            vnic_dev_open_done, 0);
 539        if (err) {
 540                shost_printk(KERN_ERR, fnic->lport->host,
 541                             "vNIC dev open failed, aborting.\n");
 542                goto err_out_vnic_unregister;
 543        }
 544
 545        err = vnic_dev_init(fnic->vdev, 0);
 546        if (err) {
 547                shost_printk(KERN_ERR, fnic->lport->host,
 548                             "vNIC dev init failed, aborting.\n");
 549                goto err_out_dev_close;
 550        }
 551
 552        err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
 553        if (err) {
 554                shost_printk(KERN_ERR, fnic->lport->host,
 555                             "vNIC get MAC addr failed \n");
 556                goto err_out_dev_close;
 557        }
 558        /* set data_src for point-to-point mode and to keep it non-zero */
 559        memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
 560
 561        /* Get vNIC configuration */
 562        err = fnic_get_vnic_config(fnic);
 563        if (err) {
 564                shost_printk(KERN_ERR, fnic->lport->host,
 565                             "Get vNIC configuration failed, "
 566                             "aborting.\n");
 567                goto err_out_dev_close;
 568        }
 569        host->max_lun = fnic->config.luns_per_tgt;
 570        host->max_id = FNIC_MAX_FCP_TARGET;
 571        host->max_cmd_len = FCOE_MAX_CMD_LEN;
 572
 573        fnic_get_res_counts(fnic);
 574
 575        err = fnic_set_intr_mode(fnic);
 576        if (err) {
 577                shost_printk(KERN_ERR, fnic->lport->host,
 578                             "Failed to set intr mode, "
 579                             "aborting.\n");
 580                goto err_out_dev_close;
 581        }
 582
 583        err = fnic_alloc_vnic_resources(fnic);
 584        if (err) {
 585                shost_printk(KERN_ERR, fnic->lport->host,
 586                             "Failed to alloc vNIC resources, "
 587                             "aborting.\n");
 588                goto err_out_clear_intr;
 589        }
 590
 591
 592        /* initialize all fnic locks */
 593        spin_lock_init(&fnic->fnic_lock);
 594
 595        for (i = 0; i < FNIC_WQ_MAX; i++)
 596                spin_lock_init(&fnic->wq_lock[i]);
 597
 598        for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
 599                spin_lock_init(&fnic->wq_copy_lock[i]);
 600                fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
 601                fnic->fw_ack_recd[i] = 0;
 602                fnic->fw_ack_index[i] = -1;
 603        }
 604
 605        for (i = 0; i < FNIC_IO_LOCKS; i++)
 606                spin_lock_init(&fnic->io_req_lock[i]);
 607
 608        fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
 609        if (!fnic->io_req_pool)
 610                goto err_out_free_resources;
 611
 612        pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 613        if (!pool)
 614                goto err_out_free_ioreq_pool;
 615        fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
 616
 617        pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
 618        if (!pool)
 619                goto err_out_free_dflt_pool;
 620        fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
 621
 622        /* setup vlan config, hw inserts vlan header */
 623        fnic->vlan_hw_insert = 1;
 624        fnic->vlan_id = 0;
 625
 626        /* Initialize the FIP fcoe_ctrl struct */
 627        fnic->ctlr.send = fnic_eth_send;
 628        fnic->ctlr.update_mac = fnic_update_mac;
 629        fnic->ctlr.get_src_addr = fnic_get_mac;
 630        if (fnic->config.flags & VFCF_FIP_CAPABLE) {
 631                shost_printk(KERN_INFO, fnic->lport->host,
 632                             "firmware supports FIP\n");
 633                /* enable directed and multicast */
 634                vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
 635                vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
 636                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
 637                fnic->set_vlan = fnic_set_vlan;
 638                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
 639                setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
 640                                                        (unsigned long)fnic);
 641                spin_lock_init(&fnic->vlans_lock);
 642                INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
 643                INIT_WORK(&fnic->event_work, fnic_handle_event);
 644                skb_queue_head_init(&fnic->fip_frame_queue);
 645                spin_lock_irqsave(&fnic_list_lock, flags);
 646                if (!fnic_fip_queue) {
 647                        fnic_fip_queue =
 648                                create_singlethread_workqueue("fnic_fip_q");
 649                        if (!fnic_fip_queue) {
 650                                spin_unlock_irqrestore(&fnic_list_lock, flags);
 651                                printk(KERN_ERR PFX "fnic FIP work queue "
 652                                                 "create failed\n");
 653                                err = -ENOMEM;
 654                                goto err_out_free_max_pool;
 655                        }
 656                }
 657                spin_unlock_irqrestore(&fnic_list_lock, flags);
 658                INIT_LIST_HEAD(&fnic->evlist);
 659                INIT_LIST_HEAD(&fnic->vlans);
 660        } else {
 661                shost_printk(KERN_INFO, fnic->lport->host,
 662                             "firmware uses non-FIP mode\n");
 663                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
 664        }
 665        fnic->state = FNIC_IN_FC_MODE;
 666
 667        atomic_set(&fnic->in_flight, 0);
 668        fnic->state_flags = FNIC_FLAGS_NONE;
 669
 670        /* Enable hardware stripping of vlan header on ingress */
 671        fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
 672
 673        /* Setup notification buffer area */
 674        err = fnic_notify_set(fnic);
 675        if (err) {
 676                shost_printk(KERN_ERR, fnic->lport->host,
 677                             "Failed to alloc notify buffer, aborting.\n");
 678                goto err_out_free_max_pool;
 679        }
 680
 681        /* Setup notify timer when using MSI interrupts */
 682        if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
 683                setup_timer(&fnic->notify_timer,
 684                            fnic_notify_timer, (unsigned long)fnic);
 685
 686        /* allocate RQ buffers and post them to RQ*/
 687        for (i = 0; i < fnic->rq_count; i++) {
 688                err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
 689                if (err) {
 690                        shost_printk(KERN_ERR, fnic->lport->host,
 691                                     "fnic_alloc_rq_frame can't alloc "
 692                                     "frame\n");
 693                        goto err_out_free_rq_buf;
 694                }
 695        }
 696
 697        /*
 698         * Initialization done with PCI system, hardware, firmware.
 699         * Add host to SCSI
 700         */
 701        err = scsi_add_host(lp->host, &pdev->dev);
 702        if (err) {
 703                shost_printk(KERN_ERR, fnic->lport->host,
 704                             "fnic: scsi_add_host failed...exiting\n");
 705                goto err_out_free_rq_buf;
 706        }
 707
 708        /* Start local port initiatialization */
 709
 710        lp->link_up = 0;
 711
 712        lp->max_retry_count = fnic->config.flogi_retries;
 713        lp->max_rport_retry_count = fnic->config.plogi_retries;
 714        lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 715                              FCP_SPPF_CONF_COMPL);
 716        if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
 717                lp->service_params |= FCP_SPPF_RETRY;
 718
 719        lp->boot_time = jiffies;
 720        lp->e_d_tov = fnic->config.ed_tov;
 721        lp->r_a_tov = fnic->config.ra_tov;
 722        lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
 723        fc_set_wwnn(lp, fnic->config.node_wwn);
 724        fc_set_wwpn(lp, fnic->config.port_wwn);
 725
 726        fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
 727
 728        if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
 729                               FCPIO_HOST_EXCH_RANGE_END, NULL)) {
 730                err = -ENOMEM;
 731                goto err_out_remove_scsi_host;
 732        }
 733
 734        fc_lport_init_stats(lp);
 735
 736        fc_lport_config(lp);
 737
 738        if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
 739                       sizeof(struct fc_frame_header))) {
 740                err = -EINVAL;
 741                goto err_out_free_exch_mgr;
 742        }
 743        fc_host_maxframe_size(lp->host) = lp->mfs;
 744        fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
 745
 746        sprintf(fc_host_symbolic_name(lp->host),
 747                DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
 748
 749        spin_lock_irqsave(&fnic_list_lock, flags);
 750        list_add_tail(&fnic->list, &fnic_list);
 751        spin_unlock_irqrestore(&fnic_list_lock, flags);
 752
 753        INIT_WORK(&fnic->link_work, fnic_handle_link);
 754        INIT_WORK(&fnic->frame_work, fnic_handle_frame);
 755        skb_queue_head_init(&fnic->frame_queue);
 756        skb_queue_head_init(&fnic->tx_queue);
 757
 758        /* Enable all queues */
 759        for (i = 0; i < fnic->raw_wq_count; i++)
 760                vnic_wq_enable(&fnic->wq[i]);
 761        for (i = 0; i < fnic->rq_count; i++)
 762                vnic_rq_enable(&fnic->rq[i]);
 763        for (i = 0; i < fnic->wq_copy_count; i++)
 764                vnic_wq_copy_enable(&fnic->wq_copy[i]);
 765
 766        fc_fabric_login(lp);
 767
 768        vnic_dev_enable(fnic->vdev);
 769
 770        err = fnic_request_intr(fnic);
 771        if (err) {
 772                shost_printk(KERN_ERR, fnic->lport->host,
 773                             "Unable to request irq.\n");
 774                goto err_out_free_exch_mgr;
 775        }
 776
 777        for (i = 0; i < fnic->intr_count; i++)
 778                vnic_intr_unmask(&fnic->intr[i]);
 779
 780        fnic_notify_timer_start(fnic);
 781
 782        return 0;
 783
 784err_out_free_exch_mgr:
 785        fc_exch_mgr_free(lp);
 786err_out_remove_scsi_host:
 787        fc_remove_host(lp->host);
 788        scsi_remove_host(lp->host);
 789err_out_free_rq_buf:
 790        for (i = 0; i < fnic->rq_count; i++)
 791                vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
 792        vnic_dev_notify_unset(fnic->vdev);
 793err_out_free_max_pool:
 794        mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
 795err_out_free_dflt_pool:
 796        mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
 797err_out_free_ioreq_pool:
 798        mempool_destroy(fnic->io_req_pool);
 799err_out_free_resources:
 800        fnic_free_vnic_resources(fnic);
 801err_out_clear_intr:
 802        fnic_clear_intr_mode(fnic);
 803err_out_dev_close:
 804        vnic_dev_close(fnic->vdev);
 805err_out_vnic_unregister:
 806        vnic_dev_unregister(fnic->vdev);
 807err_out_iounmap:
 808        fnic_iounmap(fnic);
 809err_out_release_regions:
 810        pci_release_regions(pdev);
 811err_out_disable_device:
 812        pci_disable_device(pdev);
 813err_out_free_hba:
 814        scsi_host_put(lp->host);
 815err_out:
 816        return err;
 817}
 818
 819static void fnic_remove(struct pci_dev *pdev)
 820{
 821        struct fnic *fnic = pci_get_drvdata(pdev);
 822        struct fc_lport *lp = fnic->lport;
 823        unsigned long flags;
 824
 825        /*
 826         * Mark state so that the workqueue thread stops forwarding
 827         * received frames and link events to the local port. ISR and
 828         * other threads that can queue work items will also stop
 829         * creating work items on the fnic workqueue
 830         */
 831        spin_lock_irqsave(&fnic->fnic_lock, flags);
 832        fnic->stop_rx_link_events = 1;
 833        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 834
 835        if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
 836                del_timer_sync(&fnic->notify_timer);
 837
 838        /*
 839         * Flush the fnic event queue. After this call, there should
 840         * be no event queued for this fnic device in the workqueue
 841         */
 842        flush_workqueue(fnic_event_queue);
 843        skb_queue_purge(&fnic->frame_queue);
 844        skb_queue_purge(&fnic->tx_queue);
 845
 846        if (fnic->config.flags & VFCF_FIP_CAPABLE) {
 847                del_timer_sync(&fnic->fip_timer);
 848                skb_queue_purge(&fnic->fip_frame_queue);
 849                fnic_fcoe_reset_vlans(fnic);
 850                fnic_fcoe_evlist_free(fnic);
 851        }
 852
 853        /*
 854         * Log off the fabric. This stops all remote ports, dns port,
 855         * logs off the fabric. This flushes all rport, disc, lport work
 856         * before returning
 857         */
 858        fc_fabric_logoff(fnic->lport);
 859
 860        spin_lock_irqsave(&fnic->fnic_lock, flags);
 861        fnic->in_remove = 1;
 862        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 863
 864        fcoe_ctlr_destroy(&fnic->ctlr);
 865        fc_lport_destroy(lp);
 866
 867        /*
 868         * This stops the fnic device, masks all interrupts. Completed
 869         * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
 870         * cleaned up
 871         */
 872        fnic_cleanup(fnic);
 873
 874        BUG_ON(!skb_queue_empty(&fnic->frame_queue));
 875        BUG_ON(!skb_queue_empty(&fnic->tx_queue));
 876
 877        spin_lock_irqsave(&fnic_list_lock, flags);
 878        list_del(&fnic->list);
 879        spin_unlock_irqrestore(&fnic_list_lock, flags);
 880
 881        fc_remove_host(fnic->lport->host);
 882        scsi_remove_host(fnic->lport->host);
 883        fc_exch_mgr_free(fnic->lport);
 884        vnic_dev_notify_unset(fnic->vdev);
 885        fnic_free_intr(fnic);
 886        fnic_free_vnic_resources(fnic);
 887        fnic_clear_intr_mode(fnic);
 888        vnic_dev_close(fnic->vdev);
 889        vnic_dev_unregister(fnic->vdev);
 890        fnic_iounmap(fnic);
 891        pci_release_regions(pdev);
 892        pci_disable_device(pdev);
 893        pci_set_drvdata(pdev, NULL);
 894        scsi_host_put(lp->host);
 895}
 896
 897static struct pci_driver fnic_driver = {
 898        .name = DRV_NAME,
 899        .id_table = fnic_id_table,
 900        .probe = fnic_probe,
 901        .remove = fnic_remove,
 902};
 903
 904static int __init fnic_init_module(void)
 905{
 906        size_t len;
 907        int err = 0;
 908
 909        printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 910
 911        /* Allocate memory for trace buffer */
 912        err = fnic_trace_buf_init();
 913        if (err < 0) {
 914                printk(KERN_ERR PFX "Trace buffer initialization Failed "
 915                                  "Fnic Tracing utility is disabled\n");
 916                fnic_trace_free();
 917        }
 918
 919        /* Create a cache for allocation of default size sgls */
 920        len = sizeof(struct fnic_dflt_sgl_list);
 921        fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
 922                ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
 923                 SLAB_HWCACHE_ALIGN,
 924                 NULL);
 925        if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
 926                printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
 927                err = -ENOMEM;
 928                goto err_create_fnic_sgl_slab_dflt;
 929        }
 930
 931        /* Create a cache for allocation of max size sgls*/
 932        len = sizeof(struct fnic_sgl_list);
 933        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
 934                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
 935                  SLAB_HWCACHE_ALIGN,
 936                  NULL);
 937        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
 938                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
 939                err = -ENOMEM;
 940                goto err_create_fnic_sgl_slab_max;
 941        }
 942
 943        /* Create a cache of io_req structs for use via mempool */
 944        fnic_io_req_cache = kmem_cache_create("fnic_io_req",
 945                                              sizeof(struct fnic_io_req),
 946                                              0, SLAB_HWCACHE_ALIGN, NULL);
 947        if (!fnic_io_req_cache) {
 948                printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
 949                err = -ENOMEM;
 950                goto err_create_fnic_ioreq_slab;
 951        }
 952
 953        fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
 954        if (!fnic_event_queue) {
 955                printk(KERN_ERR PFX "fnic work queue create failed\n");
 956                err = -ENOMEM;
 957                goto err_create_fnic_workq;
 958        }
 959
 960        spin_lock_init(&fnic_list_lock);
 961        INIT_LIST_HEAD(&fnic_list);
 962
 963        fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
 964        if (!fnic_fc_transport) {
 965                printk(KERN_ERR PFX "fc_attach_transport error\n");
 966                err = -ENOMEM;
 967                goto err_fc_transport;
 968        }
 969
 970        /* register the driver with PCI system */
 971        err = pci_register_driver(&fnic_driver);
 972        if (err < 0) {
 973                printk(KERN_ERR PFX "pci register error\n");
 974                goto err_pci_register;
 975        }
 976        return err;
 977
 978err_pci_register:
 979        fc_release_transport(fnic_fc_transport);
 980err_fc_transport:
 981        destroy_workqueue(fnic_event_queue);
 982err_create_fnic_workq:
 983        kmem_cache_destroy(fnic_io_req_cache);
 984err_create_fnic_ioreq_slab:
 985        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
 986err_create_fnic_sgl_slab_max:
 987        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 988err_create_fnic_sgl_slab_dflt:
 989        fnic_trace_free();
 990        return err;
 991}
 992
 993static void __exit fnic_cleanup_module(void)
 994{
 995        pci_unregister_driver(&fnic_driver);
 996        destroy_workqueue(fnic_event_queue);
 997        if (fnic_fip_queue) {
 998                flush_workqueue(fnic_fip_queue);
 999                destroy_workqueue(fnic_fip_queue);
1000        }
1001        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
1002        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1003        kmem_cache_destroy(fnic_io_req_cache);
1004        fc_release_transport(fnic_fc_transport);
1005        fnic_trace_free();
1006}
1007
1008module_init(fnic_init_module);
1009module_exit(fnic_cleanup_module);
1010
1011