linux/drivers/scsi/csiostor/csio_init.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio FCoE driver for Linux.
   3 *
   4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/init.h>
  40#include <linux/pci.h>
  41#include <linux/aer.h>
  42#include <linux/mm.h>
  43#include <linux/notifier.h>
  44#include <linux/kdebug.h>
  45#include <linux/seq_file.h>
  46#include <linux/debugfs.h>
  47#include <linux/string.h>
  48#include <linux/export.h>
  49
  50#include "csio_init.h"
  51#include "csio_defs.h"
  52
  53#define CSIO_MIN_MEMPOOL_SZ     64
  54
  55static struct dentry *csio_debugfs_root;
  56
  57static struct scsi_transport_template *csio_fcoe_transport;
  58static struct scsi_transport_template *csio_fcoe_transport_vport;
  59
  60/*
  61 * debugfs support
  62 */
  63static ssize_t
  64csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  65{
  66        loff_t pos = *ppos;
  67        loff_t avail = file_inode(file)->i_size;
  68        unsigned int mem = (uintptr_t)file->private_data & 3;
  69        struct csio_hw *hw = file->private_data - mem;
  70
  71        if (pos < 0)
  72                return -EINVAL;
  73        if (pos >= avail)
  74                return 0;
  75        if (count > avail - pos)
  76                count = avail - pos;
  77
  78        while (count) {
  79                size_t len;
  80                int ret, ofst;
  81                __be32 data[16];
  82
  83                if (mem == MEM_MC)
  84                        ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
  85                                                         data, NULL);
  86                else
  87                        ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
  88                                                          data, NULL);
  89                if (ret)
  90                        return ret;
  91
  92                ofst = pos % sizeof(data);
  93                len = min(count, sizeof(data) - ofst);
  94                if (copy_to_user(buf, (u8 *)data + ofst, len))
  95                        return -EFAULT;
  96
  97                buf += len;
  98                pos += len;
  99                count -= len;
 100        }
 101        count = pos - *ppos;
 102        *ppos = pos;
 103        return count;
 104}
 105
 106static const struct file_operations csio_mem_debugfs_fops = {
 107        .owner   = THIS_MODULE,
 108        .open    = simple_open,
 109        .read    = csio_mem_read,
 110        .llseek  = default_llseek,
 111};
 112
 113void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
 114                                 unsigned int idx, unsigned int size_mb)
 115{
 116        struct dentry *de;
 117
 118        de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
 119                                 (void *)hw + idx, &csio_mem_debugfs_fops);
 120        if (de && de->d_inode)
 121                de->d_inode->i_size = size_mb << 20;
 122}
 123
 124static int csio_setup_debugfs(struct csio_hw *hw)
 125{
 126        int i;
 127
 128        if (IS_ERR_OR_NULL(hw->debugfs_root))
 129                return -1;
 130
 131        i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
 132        if (i & EDRAM0_ENABLE)
 133                csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
 134        if (i & EDRAM1_ENABLE)
 135                csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
 136
 137        hw->chip_ops->chip_dfs_create_ext_mem(hw);
 138        return 0;
 139}
 140
 141/*
 142 * csio_dfs_create - Creates and sets up per-hw debugfs.
 143 *
 144 */
 145static int
 146csio_dfs_create(struct csio_hw *hw)
 147{
 148        if (csio_debugfs_root) {
 149                hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
 150                                                        csio_debugfs_root);
 151                csio_setup_debugfs(hw);
 152        }
 153
 154        return 0;
 155}
 156
 157/*
 158 * csio_dfs_destroy - Destroys per-hw debugfs.
 159 */
 160static int
 161csio_dfs_destroy(struct csio_hw *hw)
 162{
 163        if (hw->debugfs_root)
 164                debugfs_remove_recursive(hw->debugfs_root);
 165
 166        return 0;
 167}
 168
 169/*
 170 * csio_dfs_init - Debug filesystem initialization for the module.
 171 *
 172 */
 173static int
 174csio_dfs_init(void)
 175{
 176        csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
 177        if (!csio_debugfs_root)
 178                pr_warn("Could not create debugfs entry, continuing\n");
 179
 180        return 0;
 181}
 182
 183/*
 184 * csio_dfs_exit - debugfs cleanup for the module.
 185 */
 186static void
 187csio_dfs_exit(void)
 188{
 189        debugfs_remove(csio_debugfs_root);
 190}
 191
 192/*
 193 * csio_pci_init - PCI initialization.
 194 * @pdev: PCI device.
 195 * @bars: Bitmask of bars to be requested.
 196 *
 197 * Initializes the PCI function by enabling MMIO, setting bus
 198 * mastership and setting DMA mask.
 199 */
 200static int
 201csio_pci_init(struct pci_dev *pdev, int *bars)
 202{
 203        int rv = -ENODEV;
 204
 205        *bars = pci_select_bars(pdev, IORESOURCE_MEM);
 206
 207        if (pci_enable_device_mem(pdev))
 208                goto err;
 209
 210        if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
 211                goto err_disable_device;
 212
 213        pci_set_master(pdev);
 214        pci_try_set_mwi(pdev);
 215
 216        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 217                pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 218        } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 219                pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 220        } else {
 221                dev_err(&pdev->dev, "No suitable DMA available.\n");
 222                goto err_release_regions;
 223        }
 224
 225        return 0;
 226
 227err_release_regions:
 228        pci_release_selected_regions(pdev, *bars);
 229err_disable_device:
 230        pci_disable_device(pdev);
 231err:
 232        return rv;
 233
 234}
 235
 236/*
 237 * csio_pci_exit - PCI unitialization.
 238 * @pdev: PCI device.
 239 * @bars: Bars to be released.
 240 *
 241 */
 242static void
 243csio_pci_exit(struct pci_dev *pdev, int *bars)
 244{
 245        pci_release_selected_regions(pdev, *bars);
 246        pci_disable_device(pdev);
 247}
 248
 249/*
 250 * csio_hw_init_workers - Initialize the HW module's worker threads.
 251 * @hw: HW module.
 252 *
 253 */
 254static void
 255csio_hw_init_workers(struct csio_hw *hw)
 256{
 257        INIT_WORK(&hw->evtq_work, csio_evtq_worker);
 258}
 259
 260static void
 261csio_hw_exit_workers(struct csio_hw *hw)
 262{
 263        cancel_work_sync(&hw->evtq_work);
 264        flush_scheduled_work();
 265}
 266
 267static int
 268csio_create_queues(struct csio_hw *hw)
 269{
 270        int i, j;
 271        struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
 272        int rv;
 273        struct csio_scsi_cpu_info *info;
 274
 275        if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
 276                return 0;
 277
 278        if (hw->intr_mode != CSIO_IM_MSIX) {
 279                rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
 280                                        0, hw->pport[0].portid, false, NULL);
 281                if (rv != 0) {
 282                        csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
 283                        return rv;
 284                }
 285        }
 286
 287        /* FW event queue */
 288        rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
 289                               csio_get_fwevt_intr_idx(hw),
 290                               hw->pport[0].portid, true, NULL);
 291        if (rv != 0) {
 292                csio_err(hw, "FW event IQ config failed!: %d\n", rv);
 293                return rv;
 294        }
 295
 296        /* Create mgmt queue */
 297        rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
 298                        mgmtm->iq_idx, hw->pport[0].portid, NULL);
 299
 300        if (rv != 0) {
 301                csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
 302                goto err;
 303        }
 304
 305        /* Create SCSI queues */
 306        for (i = 0; i < hw->num_pports; i++) {
 307                info = &hw->scsi_cpu_info[i];
 308
 309                for (j = 0; j < info->max_cpus; j++) {
 310                        struct csio_scsi_qset *sqset = &hw->sqset[i][j];
 311
 312                        rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
 313                                               sqset->intr_idx, i, false, NULL);
 314                        if (rv != 0) {
 315                                csio_err(hw,
 316                                   "SCSI module IQ config failed [%d][%d]:%d\n",
 317                                   i, j, rv);
 318                                goto err;
 319                        }
 320                        rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
 321                                               sqset->iq_idx, i, NULL);
 322                        if (rv != 0) {
 323                                csio_err(hw,
 324                                   "SCSI module EQ config failed [%d][%d]:%d\n",
 325                                   i, j, rv);
 326                                goto err;
 327                        }
 328                } /* for all CPUs */
 329        } /* For all ports */
 330
 331        hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
 332        return 0;
 333err:
 334        csio_wr_destroy_queues(hw, true);
 335        return -EINVAL;
 336}
 337
 338/*
 339 * csio_config_queues - Configure the DMA queues.
 340 * @hw: HW module.
 341 *
 342 * Allocates memory for queues are registers them with FW.
 343 */
 344int
 345csio_config_queues(struct csio_hw *hw)
 346{
 347        int i, j, idx, k = 0;
 348        int rv;
 349        struct csio_scsi_qset *sqset;
 350        struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
 351        struct csio_scsi_qset *orig;
 352        struct csio_scsi_cpu_info *info;
 353
 354        if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
 355                return csio_create_queues(hw);
 356
 357        /* Calculate number of SCSI queues for MSIX we would like */
 358        hw->num_scsi_msix_cpus = num_online_cpus();
 359        hw->num_sqsets = num_online_cpus() * hw->num_pports;
 360
 361        if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
 362                hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
 363                hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
 364        }
 365
 366        /* Initialize max_cpus, may get reduced during msix allocations */
 367        for (i = 0; i < hw->num_pports; i++)
 368                hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
 369
 370        csio_dbg(hw, "nsqsets:%d scpus:%d\n",
 371                    hw->num_sqsets, hw->num_scsi_msix_cpus);
 372
 373        csio_intr_enable(hw);
 374
 375        if (hw->intr_mode != CSIO_IM_MSIX) {
 376
 377                /* Allocate Forward interrupt iq. */
 378                hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
 379                                                CSIO_INTR_WRSIZE, CSIO_INGRESS,
 380                                                (void *)hw, 0, 0, NULL);
 381                if (hw->intr_iq_idx == -1) {
 382                        csio_err(hw,
 383                                 "Forward interrupt queue creation failed\n");
 384                        goto intr_disable;
 385                }
 386        }
 387
 388        /* Allocate the FW evt queue */
 389        hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
 390                                           CSIO_FWEVT_WRSIZE,
 391                                           CSIO_INGRESS, (void *)hw,
 392                                           CSIO_FWEVT_FLBUFS, 0,
 393                                           csio_fwevt_intx_handler);
 394        if (hw->fwevt_iq_idx == -1) {
 395                csio_err(hw, "FW evt queue creation failed\n");
 396                goto intr_disable;
 397        }
 398
 399        /* Allocate the mgmt queue */
 400        mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
 401                                      CSIO_MGMT_EQ_WRSIZE,
 402                                      CSIO_EGRESS, (void *)hw, 0, 0, NULL);
 403        if (mgmtm->eq_idx == -1) {
 404                csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
 405                goto intr_disable;
 406        }
 407
 408        /* Use FW IQ for MGMT req completion */
 409        mgmtm->iq_idx = hw->fwevt_iq_idx;
 410
 411        /* Allocate SCSI queues */
 412        for (i = 0; i < hw->num_pports; i++) {
 413                info = &hw->scsi_cpu_info[i];
 414
 415                for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
 416                        sqset = &hw->sqset[i][j];
 417
 418                        if (j >= info->max_cpus) {
 419                                k = j % info->max_cpus;
 420                                orig = &hw->sqset[i][k];
 421                                sqset->eq_idx = orig->eq_idx;
 422                                sqset->iq_idx = orig->iq_idx;
 423                                continue;
 424                        }
 425
 426                        idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
 427                                              CSIO_EGRESS, (void *)hw, 0, 0,
 428                                              NULL);
 429                        if (idx == -1) {
 430                                csio_err(hw, "EQ creation failed for idx:%d\n",
 431                                            idx);
 432                                goto intr_disable;
 433                        }
 434
 435                        sqset->eq_idx = idx;
 436
 437                        idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
 438                                             CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
 439                                             (void *)hw, 0, 0,
 440                                             csio_scsi_intx_handler);
 441                        if (idx == -1) {
 442                                csio_err(hw, "IQ creation failed for idx:%d\n",
 443                                            idx);
 444                                goto intr_disable;
 445                        }
 446                        sqset->iq_idx = idx;
 447                } /* for all CPUs */
 448        } /* For all ports */
 449
 450        hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
 451
 452        rv = csio_create_queues(hw);
 453        if (rv != 0)
 454                goto intr_disable;
 455
 456        /*
 457         * Now request IRQs for the vectors. In the event of a failure,
 458         * cleanup is handled internally by this function.
 459         */
 460        rv = csio_request_irqs(hw);
 461        if (rv != 0)
 462                return -EINVAL;
 463
 464        return 0;
 465
 466intr_disable:
 467        csio_intr_disable(hw, false);
 468
 469        return -EINVAL;
 470}
 471
 472static int
 473csio_resource_alloc(struct csio_hw *hw)
 474{
 475        struct csio_wrm *wrm = csio_hw_to_wrm(hw);
 476        int rv = -ENOMEM;
 477
 478        wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
 479                       CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
 480
 481        hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
 482                                                  sizeof(struct csio_mb));
 483        if (!hw->mb_mempool)
 484                goto err;
 485
 486        hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
 487                                                     sizeof(struct csio_rnode));
 488        if (!hw->rnode_mempool)
 489                goto err_free_mb_mempool;
 490
 491        hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
 492                                            CSIO_SCSI_RSP_LEN, 8, 0);
 493        if (!hw->scsi_pci_pool)
 494                goto err_free_rn_pool;
 495
 496        return 0;
 497
 498err_free_rn_pool:
 499        mempool_destroy(hw->rnode_mempool);
 500        hw->rnode_mempool = NULL;
 501err_free_mb_mempool:
 502        mempool_destroy(hw->mb_mempool);
 503        hw->mb_mempool = NULL;
 504err:
 505        return rv;
 506}
 507
 508static void
 509csio_resource_free(struct csio_hw *hw)
 510{
 511        pci_pool_destroy(hw->scsi_pci_pool);
 512        hw->scsi_pci_pool = NULL;
 513        mempool_destroy(hw->rnode_mempool);
 514        hw->rnode_mempool = NULL;
 515        mempool_destroy(hw->mb_mempool);
 516        hw->mb_mempool = NULL;
 517}
 518
 519/*
 520 * csio_hw_alloc - Allocate and initialize the HW module.
 521 * @pdev: PCI device.
 522 *
 523 * Allocates HW structure, DMA, memory resources, maps BARS to
 524 * host memory and initializes HW module.
 525 */
 526static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
 527{
 528        struct csio_hw *hw;
 529
 530        hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
 531        if (!hw)
 532                goto err;
 533
 534        hw->pdev = pdev;
 535        strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
 536
 537        /* memory pool/DMA pool allocation */
 538        if (csio_resource_alloc(hw))
 539                goto err_free_hw;
 540
 541        /* Get the start address of registers from BAR 0 */
 542        hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
 543                                       pci_resource_len(pdev, 0));
 544        if (!hw->regstart) {
 545                csio_err(hw, "Could not map BAR 0, regstart = %p\n",
 546                         hw->regstart);
 547                goto err_resource_free;
 548        }
 549
 550        csio_hw_init_workers(hw);
 551
 552        if (csio_hw_init(hw))
 553                goto err_unmap_bar;
 554
 555        csio_dfs_create(hw);
 556
 557        csio_dbg(hw, "hw:%p\n", hw);
 558
 559        return hw;
 560
 561err_unmap_bar:
 562        csio_hw_exit_workers(hw);
 563        iounmap(hw->regstart);
 564err_resource_free:
 565        csio_resource_free(hw);
 566err_free_hw:
 567        kfree(hw);
 568err:
 569        return NULL;
 570}
 571
 572/*
 573 * csio_hw_free - Uninitialize and free the HW module.
 574 * @hw: The HW module
 575 *
 576 * Disable interrupts, uninit the HW module, free resources, free hw.
 577 */
 578static void
 579csio_hw_free(struct csio_hw *hw)
 580{
 581        csio_intr_disable(hw, true);
 582        csio_hw_exit_workers(hw);
 583        csio_hw_exit(hw);
 584        iounmap(hw->regstart);
 585        csio_dfs_destroy(hw);
 586        csio_resource_free(hw);
 587        kfree(hw);
 588}
 589
 590/**
 591 * csio_shost_init - Create and initialize the lnode module.
 592 * @hw:         The HW module.
 593 * @dev:        The device associated with this invocation.
 594 * @probe:      Called from probe context or not?
 595 * @os_pln:     Parent lnode if any.
 596 *
 597 * Allocates lnode structure via scsi_host_alloc, initializes
 598 * shost, initializes lnode module and registers with SCSI ML
 599 * via scsi_host_add. This function is shared between physical and
 600 * virtual node ports.
 601 */
 602struct csio_lnode *
 603csio_shost_init(struct csio_hw *hw, struct device *dev,
 604                  bool probe, struct csio_lnode *pln)
 605{
 606        struct Scsi_Host  *shost = NULL;
 607        struct csio_lnode *ln;
 608
 609        csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
 610        csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
 611
 612        /*
 613         * hw->pdev is the physical port's PCI dev structure,
 614         * which will be different from the NPIV dev structure.
 615         */
 616        if (dev == &hw->pdev->dev)
 617                shost = scsi_host_alloc(
 618                                &csio_fcoe_shost_template,
 619                                sizeof(struct csio_lnode));
 620        else
 621                shost = scsi_host_alloc(
 622                                &csio_fcoe_shost_vport_template,
 623                                sizeof(struct csio_lnode));
 624
 625        if (!shost)
 626                goto err;
 627
 628        ln = shost_priv(shost);
 629        memset(ln, 0, sizeof(struct csio_lnode));
 630
 631        /* Link common lnode to this lnode */
 632        ln->dev_num = (shost->host_no << 16);
 633
 634        shost->can_queue = CSIO_MAX_QUEUE;
 635        shost->this_id = -1;
 636        shost->unique_id = shost->host_no;
 637        shost->max_cmd_len = 16; /* Max CDB length supported */
 638        shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
 639                              hw->fres_info.max_ssns);
 640        shost->max_lun = CSIO_MAX_LUN;
 641        if (dev == &hw->pdev->dev)
 642                shost->transportt = csio_fcoe_transport;
 643        else
 644                shost->transportt = csio_fcoe_transport_vport;
 645
 646        /* root lnode */
 647        if (!hw->rln)
 648                hw->rln = ln;
 649
 650        /* Other initialization here: Common, Transport specific */
 651        if (csio_lnode_init(ln, hw, pln))
 652                goto err_shost_put;
 653
 654        if (scsi_add_host(shost, dev))
 655                goto err_lnode_exit;
 656
 657        return ln;
 658
 659err_lnode_exit:
 660        csio_lnode_exit(ln);
 661err_shost_put:
 662        scsi_host_put(shost);
 663err:
 664        return NULL;
 665}
 666
 667/**
 668 * csio_shost_exit - De-instantiate the shost.
 669 * @ln:         The lnode module corresponding to the shost.
 670 *
 671 */
 672void
 673csio_shost_exit(struct csio_lnode *ln)
 674{
 675        struct Scsi_Host *shost = csio_ln_to_shost(ln);
 676        struct csio_hw *hw = csio_lnode_to_hw(ln);
 677
 678        /* Inform transport */
 679        fc_remove_host(shost);
 680
 681        /* Inform SCSI ML */
 682        scsi_remove_host(shost);
 683
 684        /* Flush all the events, so that any rnode removal events
 685         * already queued are all handled, before we remove the lnode.
 686         */
 687        spin_lock_irq(&hw->lock);
 688        csio_evtq_flush(hw);
 689        spin_unlock_irq(&hw->lock);
 690
 691        csio_lnode_exit(ln);
 692        scsi_host_put(shost);
 693}
 694
 695struct csio_lnode *
 696csio_lnode_alloc(struct csio_hw *hw)
 697{
 698        return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
 699}
 700
 701void
 702csio_lnodes_block_request(struct csio_hw *hw)
 703{
 704        struct Scsi_Host  *shost;
 705        struct csio_lnode *sln;
 706        struct csio_lnode *ln;
 707        struct list_head *cur_ln, *cur_cln;
 708        struct csio_lnode **lnode_list;
 709        int cur_cnt = 0, ii;
 710
 711        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 712                        GFP_KERNEL);
 713        if (!lnode_list) {
 714                csio_err(hw, "Failed to allocate lnodes_list");
 715                return;
 716        }
 717
 718        spin_lock_irq(&hw->lock);
 719        /* Traverse sibling lnodes */
 720        list_for_each(cur_ln, &hw->sln_head) {
 721                sln = (struct csio_lnode *) cur_ln;
 722                lnode_list[cur_cnt++] = sln;
 723
 724                /* Traverse children lnodes */
 725                list_for_each(cur_cln, &sln->cln_head)
 726                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 727        }
 728        spin_unlock_irq(&hw->lock);
 729
 730        for (ii = 0; ii < cur_cnt; ii++) {
 731                csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
 732                ln = lnode_list[ii];
 733                shost = csio_ln_to_shost(ln);
 734                scsi_block_requests(shost);
 735
 736        }
 737        kfree(lnode_list);
 738}
 739
 740void
 741csio_lnodes_unblock_request(struct csio_hw *hw)
 742{
 743        struct csio_lnode *ln;
 744        struct Scsi_Host  *shost;
 745        struct csio_lnode *sln;
 746        struct list_head *cur_ln, *cur_cln;
 747        struct csio_lnode **lnode_list;
 748        int cur_cnt = 0, ii;
 749
 750        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 751                        GFP_KERNEL);
 752        if (!lnode_list) {
 753                csio_err(hw, "Failed to allocate lnodes_list");
 754                return;
 755        }
 756
 757        spin_lock_irq(&hw->lock);
 758        /* Traverse sibling lnodes */
 759        list_for_each(cur_ln, &hw->sln_head) {
 760                sln = (struct csio_lnode *) cur_ln;
 761                lnode_list[cur_cnt++] = sln;
 762
 763                /* Traverse children lnodes */
 764                list_for_each(cur_cln, &sln->cln_head)
 765                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 766        }
 767        spin_unlock_irq(&hw->lock);
 768
 769        for (ii = 0; ii < cur_cnt; ii++) {
 770                csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
 771                ln = lnode_list[ii];
 772                shost = csio_ln_to_shost(ln);
 773                scsi_unblock_requests(shost);
 774        }
 775        kfree(lnode_list);
 776}
 777
 778void
 779csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
 780{
 781        struct csio_lnode *ln;
 782        struct Scsi_Host  *shost;
 783        struct csio_lnode *sln;
 784        struct list_head *cur_ln, *cur_cln;
 785        struct csio_lnode **lnode_list;
 786        int cur_cnt = 0, ii;
 787
 788        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 789                        GFP_KERNEL);
 790        if (!lnode_list) {
 791                csio_err(hw, "Failed to allocate lnodes_list");
 792                return;
 793        }
 794
 795        spin_lock_irq(&hw->lock);
 796        /* Traverse sibling lnodes */
 797        list_for_each(cur_ln, &hw->sln_head) {
 798                sln = (struct csio_lnode *) cur_ln;
 799                if (sln->portid != portid)
 800                        continue;
 801
 802                lnode_list[cur_cnt++] = sln;
 803
 804                /* Traverse children lnodes */
 805                list_for_each(cur_cln, &sln->cln_head)
 806                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 807        }
 808        spin_unlock_irq(&hw->lock);
 809
 810        for (ii = 0; ii < cur_cnt; ii++) {
 811                csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
 812                ln = lnode_list[ii];
 813                shost = csio_ln_to_shost(ln);
 814                scsi_block_requests(shost);
 815        }
 816        kfree(lnode_list);
 817}
 818
 819void
 820csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
 821{
 822        struct csio_lnode *ln;
 823        struct Scsi_Host  *shost;
 824        struct csio_lnode *sln;
 825        struct list_head *cur_ln, *cur_cln;
 826        struct csio_lnode **lnode_list;
 827        int cur_cnt = 0, ii;
 828
 829        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 830                        GFP_KERNEL);
 831        if (!lnode_list) {
 832                csio_err(hw, "Failed to allocate lnodes_list");
 833                return;
 834        }
 835
 836        spin_lock_irq(&hw->lock);
 837        /* Traverse sibling lnodes */
 838        list_for_each(cur_ln, &hw->sln_head) {
 839                sln = (struct csio_lnode *) cur_ln;
 840                if (sln->portid != portid)
 841                        continue;
 842                lnode_list[cur_cnt++] = sln;
 843
 844                /* Traverse children lnodes */
 845                list_for_each(cur_cln, &sln->cln_head)
 846                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 847        }
 848        spin_unlock_irq(&hw->lock);
 849
 850        for (ii = 0; ii < cur_cnt; ii++) {
 851                csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
 852                ln = lnode_list[ii];
 853                shost = csio_ln_to_shost(ln);
 854                scsi_unblock_requests(shost);
 855        }
 856        kfree(lnode_list);
 857}
 858
 859void
 860csio_lnodes_exit(struct csio_hw *hw, bool npiv)
 861{
 862        struct csio_lnode *sln;
 863        struct csio_lnode *ln;
 864        struct list_head *cur_ln, *cur_cln;
 865        struct csio_lnode **lnode_list;
 866        int cur_cnt = 0, ii;
 867
 868        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 869                        GFP_KERNEL);
 870        if (!lnode_list) {
 871                csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
 872                return;
 873        }
 874
 875        /* Get all child lnodes(NPIV ports) */
 876        spin_lock_irq(&hw->lock);
 877        list_for_each(cur_ln, &hw->sln_head) {
 878                sln = (struct csio_lnode *) cur_ln;
 879
 880                /* Traverse children lnodes */
 881                list_for_each(cur_cln, &sln->cln_head)
 882                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 883        }
 884        spin_unlock_irq(&hw->lock);
 885
 886        /* Delete NPIV lnodes */
 887        for (ii = 0; ii < cur_cnt; ii++) {
 888                csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
 889                ln = lnode_list[ii];
 890                fc_vport_terminate(ln->fc_vport);
 891        }
 892
 893        /* Delete only npiv lnodes */
 894        if (npiv)
 895                goto free_lnodes;
 896
 897        cur_cnt = 0;
 898        /* Get all physical lnodes */
 899        spin_lock_irq(&hw->lock);
 900        /* Traverse sibling lnodes */
 901        list_for_each(cur_ln, &hw->sln_head) {
 902                sln = (struct csio_lnode *) cur_ln;
 903                lnode_list[cur_cnt++] = sln;
 904        }
 905        spin_unlock_irq(&hw->lock);
 906
 907        /* Delete physical lnodes */
 908        for (ii = 0; ii < cur_cnt; ii++) {
 909                csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
 910                csio_shost_exit(lnode_list[ii]);
 911        }
 912
 913free_lnodes:
 914        kfree(lnode_list);
 915}
 916
 917/*
 918 * csio_lnode_init_post: Set lnode attributes after starting HW.
 919 * @ln: lnode.
 920 *
 921 */
 922static void
 923csio_lnode_init_post(struct csio_lnode *ln)
 924{
 925        struct Scsi_Host  *shost = csio_ln_to_shost(ln);
 926
 927        csio_fchost_attr_init(ln);
 928
 929        scsi_scan_host(shost);
 930}
 931
 932/*
 933 * csio_probe_one - Instantiate this function.
 934 * @pdev: PCI device
 935 * @id: Device ID
 936 *
 937 * This is the .probe() callback of the driver. This function:
 938 * - Initializes the PCI function by enabling MMIO, setting bus
 939 *   mastership and setting DMA mask.
 940 * - Allocates HW structure, DMA, memory resources, maps BARS to
 941 *   host memory and initializes HW module.
 942 * - Allocates lnode structure via scsi_host_alloc, initializes
 943 *   shost, initialized lnode module and registers with SCSI ML
 944 *   via scsi_host_add.
 945 * - Enables interrupts, and starts the chip by kicking off the
 946 *   HW state machine.
 947 * - Once hardware is ready, initiated scan of the host via
 948 *   scsi_scan_host.
 949 */
 950static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 951{
 952        int rv;
 953        int bars;
 954        int i;
 955        struct csio_hw *hw;
 956        struct csio_lnode *ln;
 957
 958        rv = csio_pci_init(pdev, &bars);
 959        if (rv)
 960                goto err;
 961
 962        hw = csio_hw_alloc(pdev);
 963        if (!hw) {
 964                rv = -ENODEV;
 965                goto err_pci_exit;
 966        }
 967
 968        pci_set_drvdata(pdev, hw);
 969
 970        if (csio_hw_start(hw) != 0) {
 971                dev_err(&pdev->dev,
 972                        "Failed to start FW, continuing in debug mode.\n");
 973                return 0;
 974        }
 975
 976        sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
 977                    FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
 978                    FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
 979                    FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
 980                    FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
 981
 982        for (i = 0; i < hw->num_pports; i++) {
 983                ln = csio_shost_init(hw, &pdev->dev, true, NULL);
 984                if (!ln) {
 985                        rv = -ENODEV;
 986                        break;
 987                }
 988                /* Initialize portid */
 989                ln->portid = hw->pport[i].portid;
 990
 991                spin_lock_irq(&hw->lock);
 992                if (csio_lnode_start(ln) != 0)
 993                        rv = -ENODEV;
 994                spin_unlock_irq(&hw->lock);
 995
 996                if (rv)
 997                        break;
 998
 999                csio_lnode_init_post(ln);
1000        }
1001
1002        if (rv)
1003                goto err_lnode_exit;
1004
1005        return 0;
1006
1007err_lnode_exit:
1008        csio_lnodes_block_request(hw);
1009        spin_lock_irq(&hw->lock);
1010        csio_hw_stop(hw);
1011        spin_unlock_irq(&hw->lock);
1012        csio_lnodes_unblock_request(hw);
1013        pci_set_drvdata(hw->pdev, NULL);
1014        csio_lnodes_exit(hw, 0);
1015        csio_hw_free(hw);
1016err_pci_exit:
1017        csio_pci_exit(pdev, &bars);
1018err:
1019        dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
1020        return rv;
1021}
1022
1023/*
1024 * csio_remove_one - Remove one instance of the driver at this PCI function.
1025 * @pdev: PCI device
1026 *
1027 * Used during hotplug operation.
1028 */
1029static void csio_remove_one(struct pci_dev *pdev)
1030{
1031        struct csio_hw *hw = pci_get_drvdata(pdev);
1032        int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1033
1034        csio_lnodes_block_request(hw);
1035        spin_lock_irq(&hw->lock);
1036
1037        /* Stops lnode, Rnode s/m
1038         * Quiesce IOs.
1039         * All sessions with remote ports are unregistered.
1040         */
1041        csio_hw_stop(hw);
1042        spin_unlock_irq(&hw->lock);
1043        csio_lnodes_unblock_request(hw);
1044
1045        csio_lnodes_exit(hw, 0);
1046        csio_hw_free(hw);
1047        pci_set_drvdata(pdev, NULL);
1048        csio_pci_exit(pdev, &bars);
1049}
1050
1051/*
1052 * csio_pci_error_detected - PCI error was detected
1053 * @pdev: PCI device
1054 *
1055 */
1056static pci_ers_result_t
1057csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1058{
1059        struct csio_hw *hw = pci_get_drvdata(pdev);
1060
1061        csio_lnodes_block_request(hw);
1062        spin_lock_irq(&hw->lock);
1063
1064        /* Post PCI error detected evt to HW s/m
1065         * HW s/m handles this evt by quiescing IOs, unregisters rports
1066         * and finally takes the device to offline.
1067         */
1068        csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
1069        spin_unlock_irq(&hw->lock);
1070        csio_lnodes_unblock_request(hw);
1071        csio_lnodes_exit(hw, 0);
1072        csio_intr_disable(hw, true);
1073        pci_disable_device(pdev);
1074        return state == pci_channel_io_perm_failure ?
1075                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1076}
1077
1078/*
1079 * csio_pci_slot_reset - PCI slot has been reset.
1080 * @pdev: PCI device
1081 *
1082 */
1083static pci_ers_result_t
1084csio_pci_slot_reset(struct pci_dev *pdev)
1085{
1086        struct csio_hw *hw = pci_get_drvdata(pdev);
1087        int ready;
1088
1089        if (pci_enable_device(pdev)) {
1090                dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
1091                return PCI_ERS_RESULT_DISCONNECT;
1092        }
1093
1094        pci_set_master(pdev);
1095        pci_restore_state(pdev);
1096        pci_save_state(pdev);
1097        pci_cleanup_aer_uncorrect_error_status(pdev);
1098
1099        /* Bring HW s/m to ready state.
1100         * but don't resume IOs.
1101         */
1102        spin_lock_irq(&hw->lock);
1103        csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
1104        ready = csio_is_hw_ready(hw);
1105        spin_unlock_irq(&hw->lock);
1106
1107        if (ready) {
1108                return PCI_ERS_RESULT_RECOVERED;
1109        } else {
1110                dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
1111                return PCI_ERS_RESULT_DISCONNECT;
1112        }
1113}
1114
1115/*
1116 * csio_pci_resume - Resume normal operations
1117 * @pdev: PCI device
1118 *
1119 */
1120static void
1121csio_pci_resume(struct pci_dev *pdev)
1122{
1123        struct csio_hw *hw = pci_get_drvdata(pdev);
1124        struct csio_lnode *ln;
1125        int rv = 0;
1126        int i;
1127
1128        /* Bring the LINK UP and Resume IO */
1129
1130        for (i = 0; i < hw->num_pports; i++) {
1131                ln = csio_shost_init(hw, &pdev->dev, true, NULL);
1132                if (!ln) {
1133                        rv = -ENODEV;
1134                        break;
1135                }
1136                /* Initialize portid */
1137                ln->portid = hw->pport[i].portid;
1138
1139                spin_lock_irq(&hw->lock);
1140                if (csio_lnode_start(ln) != 0)
1141                        rv = -ENODEV;
1142                spin_unlock_irq(&hw->lock);
1143
1144                if (rv)
1145                        break;
1146
1147                csio_lnode_init_post(ln);
1148        }
1149
1150        if (rv)
1151                goto err_resume_exit;
1152
1153        return;
1154
1155err_resume_exit:
1156        csio_lnodes_block_request(hw);
1157        spin_lock_irq(&hw->lock);
1158        csio_hw_stop(hw);
1159        spin_unlock_irq(&hw->lock);
1160        csio_lnodes_unblock_request(hw);
1161        csio_lnodes_exit(hw, 0);
1162        csio_hw_free(hw);
1163        dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
1164}
1165
1166static struct pci_error_handlers csio_err_handler = {
1167        .error_detected = csio_pci_error_detected,
1168        .slot_reset     = csio_pci_slot_reset,
1169        .resume         = csio_pci_resume,
1170};
1171
1172static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1173        CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0),        /* T4 DEBUG FCOE */
1174        CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0),         /* T420CR FCOE */
1175        CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0),         /* T422CR FCOE */
1176        CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0),         /* T440CR FCOE */
1177        CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0),        /* T420BCH FCOE */
1178        CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0),        /* T440BCH FCOE */
1179        CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0),         /* T440CH FCOE */
1180        CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0),         /* T420SO FCOE */
1181        CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0),         /* T420CX FCOE */
1182        CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0),         /* T420BT FCOE */
1183        CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0),         /* T404BT FCOE */
1184        CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0),           /* B420 FCOE */
1185        CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0),           /* B404 FCOE */
1186        CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0),         /* T480 CR FCOE */
1187        CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0),       /* T440 LP-CR FCOE */
1188        CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0),   /* AMSTERDAM T4 FCOE */
1189        CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0),    /* HUAWEI T480 FCOE */
1190        CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0),    /* HUAWEI T440 FCOE */
1191        CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0),  /* HUAWEI STG FCOE */
1192        CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0),    /* ACROMAG XAUI FCOE */
1193        CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
1194        CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0),    /* HUAWEI 10GT FCOE */
1195        CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
1196        CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0),        /* T5 DEBUG FCOE */
1197        CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0),         /* T520CR FCOE */
1198        CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0),         /* T522CR FCOE */
1199        CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0),         /* T540CR FCOE */
1200        CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0),        /* T520BCH FCOE */
1201        CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0),        /* T540BCH FCOE */
1202        CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0),         /* T540CH FCOE */
1203        CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0),         /* T520SO FCOE */
1204        CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0),         /* T520CX FCOE */
1205        CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0),         /* T520BT FCOE */
1206        CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0),         /* T504BT FCOE */
1207        CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0),           /* B520 FCOE */
1208        CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0),           /* B504 FCOE */
1209        CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0),        /* T580 CR FCOE */
1210        CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0),       /* T540 LP-CR FCOE */
1211        CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0),   /* AMSTERDAM T5 FCOE */
1212        CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0),       /* T580 LP-CR FCOE */
1213        CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0),       /* T520 LL-CR FCOE */
1214        CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0),         /* T560 CR FCOE */
1215        CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0),         /* T580 CR FCOE */
1216        { 0, 0, 0, 0, 0, 0, 0 }
1217};
1218
1219
1220static struct pci_driver csio_pci_driver = {
1221        .name           = KBUILD_MODNAME,
1222        .driver         = {
1223                .owner  = THIS_MODULE,
1224        },
1225        .id_table       = csio_pci_tbl,
1226        .probe          = csio_probe_one,
1227        .remove         = csio_remove_one,
1228        .err_handler    = &csio_err_handler,
1229};
1230
1231/*
1232 * csio_init - Chelsio storage driver initialization function.
1233 *
1234 */
1235static int __init
1236csio_init(void)
1237{
1238        int rv = -ENOMEM;
1239
1240        pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
1241
1242        csio_dfs_init();
1243
1244        csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
1245        if (!csio_fcoe_transport)
1246                goto err;
1247
1248        csio_fcoe_transport_vport =
1249                        fc_attach_transport(&csio_fc_transport_vport_funcs);
1250        if (!csio_fcoe_transport_vport)
1251                goto err_vport;
1252
1253        rv = pci_register_driver(&csio_pci_driver);
1254        if (rv)
1255                goto err_pci;
1256
1257        return 0;
1258
1259err_pci:
1260        fc_release_transport(csio_fcoe_transport_vport);
1261err_vport:
1262        fc_release_transport(csio_fcoe_transport);
1263err:
1264        csio_dfs_exit();
1265        return rv;
1266}
1267
1268/*
1269 * csio_exit - Chelsio storage driver uninitialization .
1270 *
1271 * Function that gets called in the unload path.
1272 */
1273static void __exit
1274csio_exit(void)
1275{
1276        pci_unregister_driver(&csio_pci_driver);
1277        csio_dfs_exit();
1278        fc_release_transport(csio_fcoe_transport_vport);
1279        fc_release_transport(csio_fcoe_transport);
1280}
1281
1282module_init(csio_init);
1283module_exit(csio_exit);
1284MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1285MODULE_DESCRIPTION(CSIO_DRV_DESC);
1286MODULE_LICENSE(CSIO_DRV_LICENSE);
1287MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1288MODULE_VERSION(CSIO_DRV_VERSION);
1289MODULE_FIRMWARE(FW_FNAME_T4);
1290MODULE_FIRMWARE(FW_FNAME_T5);
1291