linux/drivers/scsi/csiostor/csio_init.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio FCoE driver for Linux.
   3 *
   4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/init.h>
  40#include <linux/pci.h>
  41#include <linux/aer.h>
  42#include <linux/mm.h>
  43#include <linux/notifier.h>
  44#include <linux/kdebug.h>
  45#include <linux/seq_file.h>
  46#include <linux/debugfs.h>
  47#include <linux/string.h>
  48#include <linux/export.h>
  49
  50#include "csio_init.h"
  51#include "csio_defs.h"
  52
  53#define CSIO_MIN_MEMPOOL_SZ     64
  54
  55static struct dentry *csio_debugfs_root;
  56
  57static struct scsi_transport_template *csio_fcoe_transport;
  58static struct scsi_transport_template *csio_fcoe_transport_vport;
  59
  60/*
  61 * debugfs support
  62 */
  63static ssize_t
  64csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  65{
  66        loff_t pos = *ppos;
  67        loff_t avail = file_inode(file)->i_size;
  68        unsigned int mem = (uintptr_t)file->private_data & 3;
  69        struct csio_hw *hw = file->private_data - mem;
  70
  71        if (pos < 0)
  72                return -EINVAL;
  73        if (pos >= avail)
  74                return 0;
  75        if (count > avail - pos)
  76                count = avail - pos;
  77
  78        while (count) {
  79                size_t len;
  80                int ret, ofst;
  81                __be32 data[16];
  82
  83                if (mem == MEM_MC)
  84                        ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
  85                                                         data, NULL);
  86                else
  87                        ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
  88                                                          data, NULL);
  89                if (ret)
  90                        return ret;
  91
  92                ofst = pos % sizeof(data);
  93                len = min(count, sizeof(data) - ofst);
  94                if (copy_to_user(buf, (u8 *)data + ofst, len))
  95                        return -EFAULT;
  96
  97                buf += len;
  98                pos += len;
  99                count -= len;
 100        }
 101        count = pos - *ppos;
 102        *ppos = pos;
 103        return count;
 104}
 105
 106static const struct file_operations csio_mem_debugfs_fops = {
 107        .owner   = THIS_MODULE,
 108        .open    = simple_open,
 109        .read    = csio_mem_read,
 110        .llseek  = default_llseek,
 111};
 112
 113void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
 114                                 unsigned int idx, unsigned int size_mb)
 115{
 116        struct dentry *de;
 117
 118        de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
 119                                 (void *)hw + idx, &csio_mem_debugfs_fops);
 120        if (de && de->d_inode)
 121                de->d_inode->i_size = size_mb << 20;
 122}
 123
 124static int csio_setup_debugfs(struct csio_hw *hw)
 125{
 126        int i;
 127
 128        if (IS_ERR_OR_NULL(hw->debugfs_root))
 129                return -1;
 130
 131        i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
 132        if (i & EDRAM0_ENABLE_F)
 133                csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
 134        if (i & EDRAM1_ENABLE_F)
 135                csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
 136
 137        hw->chip_ops->chip_dfs_create_ext_mem(hw);
 138        return 0;
 139}
 140
 141/*
 142 * csio_dfs_create - Creates and sets up per-hw debugfs.
 143 *
 144 */
 145static int
 146csio_dfs_create(struct csio_hw *hw)
 147{
 148        if (csio_debugfs_root) {
 149                hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
 150                                                        csio_debugfs_root);
 151                csio_setup_debugfs(hw);
 152        }
 153
 154        return 0;
 155}
 156
 157/*
 158 * csio_dfs_destroy - Destroys per-hw debugfs.
 159 */
 160static int
 161csio_dfs_destroy(struct csio_hw *hw)
 162{
 163        if (hw->debugfs_root)
 164                debugfs_remove_recursive(hw->debugfs_root);
 165
 166        return 0;
 167}
 168
 169/*
 170 * csio_dfs_init - Debug filesystem initialization for the module.
 171 *
 172 */
 173static int
 174csio_dfs_init(void)
 175{
 176        csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
 177        if (!csio_debugfs_root)
 178                pr_warn("Could not create debugfs entry, continuing\n");
 179
 180        return 0;
 181}
 182
 183/*
 184 * csio_dfs_exit - debugfs cleanup for the module.
 185 */
 186static void
 187csio_dfs_exit(void)
 188{
 189        debugfs_remove(csio_debugfs_root);
 190}
 191
 192/*
 193 * csio_pci_init - PCI initialization.
 194 * @pdev: PCI device.
 195 * @bars: Bitmask of bars to be requested.
 196 *
 197 * Initializes the PCI function by enabling MMIO, setting bus
 198 * mastership and setting DMA mask.
 199 */
 200static int
 201csio_pci_init(struct pci_dev *pdev, int *bars)
 202{
 203        int rv = -ENODEV;
 204
 205        *bars = pci_select_bars(pdev, IORESOURCE_MEM);
 206
 207        if (pci_enable_device_mem(pdev))
 208                goto err;
 209
 210        if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
 211                goto err_disable_device;
 212
 213        pci_set_master(pdev);
 214        pci_try_set_mwi(pdev);
 215
 216        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 217                pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 218        } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 219                pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 220        } else {
 221                dev_err(&pdev->dev, "No suitable DMA available.\n");
 222                goto err_release_regions;
 223        }
 224
 225        return 0;
 226
 227err_release_regions:
 228        pci_release_selected_regions(pdev, *bars);
 229err_disable_device:
 230        pci_disable_device(pdev);
 231err:
 232        return rv;
 233
 234}
 235
 236/*
 237 * csio_pci_exit - PCI unitialization.
 238 * @pdev: PCI device.
 239 * @bars: Bars to be released.
 240 *
 241 */
 242static void
 243csio_pci_exit(struct pci_dev *pdev, int *bars)
 244{
 245        pci_release_selected_regions(pdev, *bars);
 246        pci_disable_device(pdev);
 247}
 248
 249/*
 250 * csio_hw_init_workers - Initialize the HW module's worker threads.
 251 * @hw: HW module.
 252 *
 253 */
 254static void
 255csio_hw_init_workers(struct csio_hw *hw)
 256{
 257        INIT_WORK(&hw->evtq_work, csio_evtq_worker);
 258}
 259
 260static void
 261csio_hw_exit_workers(struct csio_hw *hw)
 262{
 263        cancel_work_sync(&hw->evtq_work);
 264        flush_scheduled_work();
 265}
 266
 267static int
 268csio_create_queues(struct csio_hw *hw)
 269{
 270        int i, j;
 271        struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
 272        int rv;
 273        struct csio_scsi_cpu_info *info;
 274
 275        if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
 276                return 0;
 277
 278        if (hw->intr_mode != CSIO_IM_MSIX) {
 279                rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
 280                                        0, hw->pport[0].portid, false, NULL);
 281                if (rv != 0) {
 282                        csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
 283                        return rv;
 284                }
 285        }
 286
 287        /* FW event queue */
 288        rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
 289                               csio_get_fwevt_intr_idx(hw),
 290                               hw->pport[0].portid, true, NULL);
 291        if (rv != 0) {
 292                csio_err(hw, "FW event IQ config failed!: %d\n", rv);
 293                return rv;
 294        }
 295
 296        /* Create mgmt queue */
 297        rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
 298                        mgmtm->iq_idx, hw->pport[0].portid, NULL);
 299
 300        if (rv != 0) {
 301                csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
 302                goto err;
 303        }
 304
 305        /* Create SCSI queues */
 306        for (i = 0; i < hw->num_pports; i++) {
 307                info = &hw->scsi_cpu_info[i];
 308
 309                for (j = 0; j < info->max_cpus; j++) {
 310                        struct csio_scsi_qset *sqset = &hw->sqset[i][j];
 311
 312                        rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
 313                                               sqset->intr_idx, i, false, NULL);
 314                        if (rv != 0) {
 315                                csio_err(hw,
 316                                   "SCSI module IQ config failed [%d][%d]:%d\n",
 317                                   i, j, rv);
 318                                goto err;
 319                        }
 320                        rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
 321                                               sqset->iq_idx, i, NULL);
 322                        if (rv != 0) {
 323                                csio_err(hw,
 324                                   "SCSI module EQ config failed [%d][%d]:%d\n",
 325                                   i, j, rv);
 326                                goto err;
 327                        }
 328                } /* for all CPUs */
 329        } /* For all ports */
 330
 331        hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
 332        return 0;
 333err:
 334        csio_wr_destroy_queues(hw, true);
 335        return -EINVAL;
 336}
 337
 338/*
 339 * csio_config_queues - Configure the DMA queues.
 340 * @hw: HW module.
 341 *
 342 * Allocates memory for queues are registers them with FW.
 343 */
 344int
 345csio_config_queues(struct csio_hw *hw)
 346{
 347        int i, j, idx, k = 0;
 348        int rv;
 349        struct csio_scsi_qset *sqset;
 350        struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
 351        struct csio_scsi_qset *orig;
 352        struct csio_scsi_cpu_info *info;
 353
 354        if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
 355                return csio_create_queues(hw);
 356
 357        /* Calculate number of SCSI queues for MSIX we would like */
 358        hw->num_scsi_msix_cpus = num_online_cpus();
 359        hw->num_sqsets = num_online_cpus() * hw->num_pports;
 360
 361        if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
 362                hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
 363                hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
 364        }
 365
 366        /* Initialize max_cpus, may get reduced during msix allocations */
 367        for (i = 0; i < hw->num_pports; i++)
 368                hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
 369
 370        csio_dbg(hw, "nsqsets:%d scpus:%d\n",
 371                    hw->num_sqsets, hw->num_scsi_msix_cpus);
 372
 373        csio_intr_enable(hw);
 374
 375        if (hw->intr_mode != CSIO_IM_MSIX) {
 376
 377                /* Allocate Forward interrupt iq. */
 378                hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
 379                                                CSIO_INTR_WRSIZE, CSIO_INGRESS,
 380                                                (void *)hw, 0, 0, NULL);
 381                if (hw->intr_iq_idx == -1) {
 382                        csio_err(hw,
 383                                 "Forward interrupt queue creation failed\n");
 384                        goto intr_disable;
 385                }
 386        }
 387
 388        /* Allocate the FW evt queue */
 389        hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
 390                                           CSIO_FWEVT_WRSIZE,
 391                                           CSIO_INGRESS, (void *)hw,
 392                                           CSIO_FWEVT_FLBUFS, 0,
 393                                           csio_fwevt_intx_handler);
 394        if (hw->fwevt_iq_idx == -1) {
 395                csio_err(hw, "FW evt queue creation failed\n");
 396                goto intr_disable;
 397        }
 398
 399        /* Allocate the mgmt queue */
 400        mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
 401                                      CSIO_MGMT_EQ_WRSIZE,
 402                                      CSIO_EGRESS, (void *)hw, 0, 0, NULL);
 403        if (mgmtm->eq_idx == -1) {
 404                csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
 405                goto intr_disable;
 406        }
 407
 408        /* Use FW IQ for MGMT req completion */
 409        mgmtm->iq_idx = hw->fwevt_iq_idx;
 410
 411        /* Allocate SCSI queues */
 412        for (i = 0; i < hw->num_pports; i++) {
 413                info = &hw->scsi_cpu_info[i];
 414
 415                for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
 416                        sqset = &hw->sqset[i][j];
 417
 418                        if (j >= info->max_cpus) {
 419                                k = j % info->max_cpus;
 420                                orig = &hw->sqset[i][k];
 421                                sqset->eq_idx = orig->eq_idx;
 422                                sqset->iq_idx = orig->iq_idx;
 423                                continue;
 424                        }
 425
 426                        idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
 427                                              CSIO_EGRESS, (void *)hw, 0, 0,
 428                                              NULL);
 429                        if (idx == -1) {
 430                                csio_err(hw, "EQ creation failed for idx:%d\n",
 431                                            idx);
 432                                goto intr_disable;
 433                        }
 434
 435                        sqset->eq_idx = idx;
 436
 437                        idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
 438                                             CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
 439                                             (void *)hw, 0, 0,
 440                                             csio_scsi_intx_handler);
 441                        if (idx == -1) {
 442                                csio_err(hw, "IQ creation failed for idx:%d\n",
 443                                            idx);
 444                                goto intr_disable;
 445                        }
 446                        sqset->iq_idx = idx;
 447                } /* for all CPUs */
 448        } /* For all ports */
 449
 450        hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
 451
 452        rv = csio_create_queues(hw);
 453        if (rv != 0)
 454                goto intr_disable;
 455
 456        /*
 457         * Now request IRQs for the vectors. In the event of a failure,
 458         * cleanup is handled internally by this function.
 459         */
 460        rv = csio_request_irqs(hw);
 461        if (rv != 0)
 462                return -EINVAL;
 463
 464        return 0;
 465
 466intr_disable:
 467        csio_intr_disable(hw, false);
 468
 469        return -EINVAL;
 470}
 471
 472static int
 473csio_resource_alloc(struct csio_hw *hw)
 474{
 475        struct csio_wrm *wrm = csio_hw_to_wrm(hw);
 476        int rv = -ENOMEM;
 477
 478        wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
 479                       CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
 480
 481        hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
 482                                                  sizeof(struct csio_mb));
 483        if (!hw->mb_mempool)
 484                goto err;
 485
 486        hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
 487                                                     sizeof(struct csio_rnode));
 488        if (!hw->rnode_mempool)
 489                goto err_free_mb_mempool;
 490
 491        hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
 492                                            CSIO_SCSI_RSP_LEN, 8, 0);
 493        if (!hw->scsi_pci_pool)
 494                goto err_free_rn_pool;
 495
 496        return 0;
 497
 498err_free_rn_pool:
 499        mempool_destroy(hw->rnode_mempool);
 500        hw->rnode_mempool = NULL;
 501err_free_mb_mempool:
 502        mempool_destroy(hw->mb_mempool);
 503        hw->mb_mempool = NULL;
 504err:
 505        return rv;
 506}
 507
 508static void
 509csio_resource_free(struct csio_hw *hw)
 510{
 511        pci_pool_destroy(hw->scsi_pci_pool);
 512        hw->scsi_pci_pool = NULL;
 513        mempool_destroy(hw->rnode_mempool);
 514        hw->rnode_mempool = NULL;
 515        mempool_destroy(hw->mb_mempool);
 516        hw->mb_mempool = NULL;
 517}
 518
 519/*
 520 * csio_hw_alloc - Allocate and initialize the HW module.
 521 * @pdev: PCI device.
 522 *
 523 * Allocates HW structure, DMA, memory resources, maps BARS to
 524 * host memory and initializes HW module.
 525 */
 526static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
 527{
 528        struct csio_hw *hw;
 529
 530        hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
 531        if (!hw)
 532                goto err;
 533
 534        hw->pdev = pdev;
 535        strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
 536
 537        /* memory pool/DMA pool allocation */
 538        if (csio_resource_alloc(hw))
 539                goto err_free_hw;
 540
 541        /* Get the start address of registers from BAR 0 */
 542        hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
 543                                       pci_resource_len(pdev, 0));
 544        if (!hw->regstart) {
 545                csio_err(hw, "Could not map BAR 0, regstart = %p\n",
 546                         hw->regstart);
 547                goto err_resource_free;
 548        }
 549
 550        csio_hw_init_workers(hw);
 551
 552        if (csio_hw_init(hw))
 553                goto err_unmap_bar;
 554
 555        csio_dfs_create(hw);
 556
 557        csio_dbg(hw, "hw:%p\n", hw);
 558
 559        return hw;
 560
 561err_unmap_bar:
 562        csio_hw_exit_workers(hw);
 563        iounmap(hw->regstart);
 564err_resource_free:
 565        csio_resource_free(hw);
 566err_free_hw:
 567        kfree(hw);
 568err:
 569        return NULL;
 570}
 571
 572/*
 573 * csio_hw_free - Uninitialize and free the HW module.
 574 * @hw: The HW module
 575 *
 576 * Disable interrupts, uninit the HW module, free resources, free hw.
 577 */
 578static void
 579csio_hw_free(struct csio_hw *hw)
 580{
 581        csio_intr_disable(hw, true);
 582        csio_hw_exit_workers(hw);
 583        csio_hw_exit(hw);
 584        iounmap(hw->regstart);
 585        csio_dfs_destroy(hw);
 586        csio_resource_free(hw);
 587        kfree(hw);
 588}
 589
 590/**
 591 * csio_shost_init - Create and initialize the lnode module.
 592 * @hw:         The HW module.
 593 * @dev:        The device associated with this invocation.
 594 * @probe:      Called from probe context or not?
 595 * @os_pln:     Parent lnode if any.
 596 *
 597 * Allocates lnode structure via scsi_host_alloc, initializes
 598 * shost, initializes lnode module and registers with SCSI ML
 599 * via scsi_host_add. This function is shared between physical and
 600 * virtual node ports.
 601 */
 602struct csio_lnode *
 603csio_shost_init(struct csio_hw *hw, struct device *dev,
 604                  bool probe, struct csio_lnode *pln)
 605{
 606        struct Scsi_Host  *shost = NULL;
 607        struct csio_lnode *ln;
 608
 609        csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
 610        csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
 611
 612        /*
 613         * hw->pdev is the physical port's PCI dev structure,
 614         * which will be different from the NPIV dev structure.
 615         */
 616        if (dev == &hw->pdev->dev)
 617                shost = scsi_host_alloc(
 618                                &csio_fcoe_shost_template,
 619                                sizeof(struct csio_lnode));
 620        else
 621                shost = scsi_host_alloc(
 622                                &csio_fcoe_shost_vport_template,
 623                                sizeof(struct csio_lnode));
 624
 625        if (!shost)
 626                goto err;
 627
 628        ln = shost_priv(shost);
 629        memset(ln, 0, sizeof(struct csio_lnode));
 630
 631        /* Link common lnode to this lnode */
 632        ln->dev_num = (shost->host_no << 16);
 633
 634        shost->can_queue = CSIO_MAX_QUEUE;
 635        shost->this_id = -1;
 636        shost->unique_id = shost->host_no;
 637        shost->max_cmd_len = 16; /* Max CDB length supported */
 638        shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
 639                              hw->fres_info.max_ssns);
 640        shost->max_lun = CSIO_MAX_LUN;
 641        if (dev == &hw->pdev->dev)
 642                shost->transportt = csio_fcoe_transport;
 643        else
 644                shost->transportt = csio_fcoe_transport_vport;
 645
 646        /* root lnode */
 647        if (!hw->rln)
 648                hw->rln = ln;
 649
 650        /* Other initialization here: Common, Transport specific */
 651        if (csio_lnode_init(ln, hw, pln))
 652                goto err_shost_put;
 653
 654        if (scsi_add_host(shost, dev))
 655                goto err_lnode_exit;
 656
 657        return ln;
 658
 659err_lnode_exit:
 660        csio_lnode_exit(ln);
 661err_shost_put:
 662        scsi_host_put(shost);
 663err:
 664        return NULL;
 665}
 666
 667/**
 668 * csio_shost_exit - De-instantiate the shost.
 669 * @ln:         The lnode module corresponding to the shost.
 670 *
 671 */
 672void
 673csio_shost_exit(struct csio_lnode *ln)
 674{
 675        struct Scsi_Host *shost = csio_ln_to_shost(ln);
 676        struct csio_hw *hw = csio_lnode_to_hw(ln);
 677
 678        /* Inform transport */
 679        fc_remove_host(shost);
 680
 681        /* Inform SCSI ML */
 682        scsi_remove_host(shost);
 683
 684        /* Flush all the events, so that any rnode removal events
 685         * already queued are all handled, before we remove the lnode.
 686         */
 687        spin_lock_irq(&hw->lock);
 688        csio_evtq_flush(hw);
 689        spin_unlock_irq(&hw->lock);
 690
 691        csio_lnode_exit(ln);
 692        scsi_host_put(shost);
 693}
 694
 695struct csio_lnode *
 696csio_lnode_alloc(struct csio_hw *hw)
 697{
 698        return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
 699}
 700
 701void
 702csio_lnodes_block_request(struct csio_hw *hw)
 703{
 704        struct Scsi_Host  *shost;
 705        struct csio_lnode *sln;
 706        struct csio_lnode *ln;
 707        struct list_head *cur_ln, *cur_cln;
 708        struct csio_lnode **lnode_list;
 709        int cur_cnt = 0, ii;
 710
 711        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 712                        GFP_KERNEL);
 713        if (!lnode_list) {
 714                csio_err(hw, "Failed to allocate lnodes_list");
 715                return;
 716        }
 717
 718        spin_lock_irq(&hw->lock);
 719        /* Traverse sibling lnodes */
 720        list_for_each(cur_ln, &hw->sln_head) {
 721                sln = (struct csio_lnode *) cur_ln;
 722                lnode_list[cur_cnt++] = sln;
 723
 724                /* Traverse children lnodes */
 725                list_for_each(cur_cln, &sln->cln_head)
 726                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 727        }
 728        spin_unlock_irq(&hw->lock);
 729
 730        for (ii = 0; ii < cur_cnt; ii++) {
 731                csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
 732                ln = lnode_list[ii];
 733                shost = csio_ln_to_shost(ln);
 734                scsi_block_requests(shost);
 735
 736        }
 737        kfree(lnode_list);
 738}
 739
 740void
 741csio_lnodes_unblock_request(struct csio_hw *hw)
 742{
 743        struct csio_lnode *ln;
 744        struct Scsi_Host  *shost;
 745        struct csio_lnode *sln;
 746        struct list_head *cur_ln, *cur_cln;
 747        struct csio_lnode **lnode_list;
 748        int cur_cnt = 0, ii;
 749
 750        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 751                        GFP_KERNEL);
 752        if (!lnode_list) {
 753                csio_err(hw, "Failed to allocate lnodes_list");
 754                return;
 755        }
 756
 757        spin_lock_irq(&hw->lock);
 758        /* Traverse sibling lnodes */
 759        list_for_each(cur_ln, &hw->sln_head) {
 760                sln = (struct csio_lnode *) cur_ln;
 761                lnode_list[cur_cnt++] = sln;
 762
 763                /* Traverse children lnodes */
 764                list_for_each(cur_cln, &sln->cln_head)
 765                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 766        }
 767        spin_unlock_irq(&hw->lock);
 768
 769        for (ii = 0; ii < cur_cnt; ii++) {
 770                csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
 771                ln = lnode_list[ii];
 772                shost = csio_ln_to_shost(ln);
 773                scsi_unblock_requests(shost);
 774        }
 775        kfree(lnode_list);
 776}
 777
 778void
 779csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
 780{
 781        struct csio_lnode *ln;
 782        struct Scsi_Host  *shost;
 783        struct csio_lnode *sln;
 784        struct list_head *cur_ln, *cur_cln;
 785        struct csio_lnode **lnode_list;
 786        int cur_cnt = 0, ii;
 787
 788        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 789                        GFP_KERNEL);
 790        if (!lnode_list) {
 791                csio_err(hw, "Failed to allocate lnodes_list");
 792                return;
 793        }
 794
 795        spin_lock_irq(&hw->lock);
 796        /* Traverse sibling lnodes */
 797        list_for_each(cur_ln, &hw->sln_head) {
 798                sln = (struct csio_lnode *) cur_ln;
 799                if (sln->portid != portid)
 800                        continue;
 801
 802                lnode_list[cur_cnt++] = sln;
 803
 804                /* Traverse children lnodes */
 805                list_for_each(cur_cln, &sln->cln_head)
 806                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 807        }
 808        spin_unlock_irq(&hw->lock);
 809
 810        for (ii = 0; ii < cur_cnt; ii++) {
 811                csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
 812                ln = lnode_list[ii];
 813                shost = csio_ln_to_shost(ln);
 814                scsi_block_requests(shost);
 815        }
 816        kfree(lnode_list);
 817}
 818
 819void
 820csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
 821{
 822        struct csio_lnode *ln;
 823        struct Scsi_Host  *shost;
 824        struct csio_lnode *sln;
 825        struct list_head *cur_ln, *cur_cln;
 826        struct csio_lnode **lnode_list;
 827        int cur_cnt = 0, ii;
 828
 829        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 830                        GFP_KERNEL);
 831        if (!lnode_list) {
 832                csio_err(hw, "Failed to allocate lnodes_list");
 833                return;
 834        }
 835
 836        spin_lock_irq(&hw->lock);
 837        /* Traverse sibling lnodes */
 838        list_for_each(cur_ln, &hw->sln_head) {
 839                sln = (struct csio_lnode *) cur_ln;
 840                if (sln->portid != portid)
 841                        continue;
 842                lnode_list[cur_cnt++] = sln;
 843
 844                /* Traverse children lnodes */
 845                list_for_each(cur_cln, &sln->cln_head)
 846                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 847        }
 848        spin_unlock_irq(&hw->lock);
 849
 850        for (ii = 0; ii < cur_cnt; ii++) {
 851                csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
 852                ln = lnode_list[ii];
 853                shost = csio_ln_to_shost(ln);
 854                scsi_unblock_requests(shost);
 855        }
 856        kfree(lnode_list);
 857}
 858
 859void
 860csio_lnodes_exit(struct csio_hw *hw, bool npiv)
 861{
 862        struct csio_lnode *sln;
 863        struct csio_lnode *ln;
 864        struct list_head *cur_ln, *cur_cln;
 865        struct csio_lnode **lnode_list;
 866        int cur_cnt = 0, ii;
 867
 868        lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
 869                        GFP_KERNEL);
 870        if (!lnode_list) {
 871                csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
 872                return;
 873        }
 874
 875        /* Get all child lnodes(NPIV ports) */
 876        spin_lock_irq(&hw->lock);
 877        list_for_each(cur_ln, &hw->sln_head) {
 878                sln = (struct csio_lnode *) cur_ln;
 879
 880                /* Traverse children lnodes */
 881                list_for_each(cur_cln, &sln->cln_head)
 882                        lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
 883        }
 884        spin_unlock_irq(&hw->lock);
 885
 886        /* Delete NPIV lnodes */
 887        for (ii = 0; ii < cur_cnt; ii++) {
 888                csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
 889                ln = lnode_list[ii];
 890                fc_vport_terminate(ln->fc_vport);
 891        }
 892
 893        /* Delete only npiv lnodes */
 894        if (npiv)
 895                goto free_lnodes;
 896
 897        cur_cnt = 0;
 898        /* Get all physical lnodes */
 899        spin_lock_irq(&hw->lock);
 900        /* Traverse sibling lnodes */
 901        list_for_each(cur_ln, &hw->sln_head) {
 902                sln = (struct csio_lnode *) cur_ln;
 903                lnode_list[cur_cnt++] = sln;
 904        }
 905        spin_unlock_irq(&hw->lock);
 906
 907        /* Delete physical lnodes */
 908        for (ii = 0; ii < cur_cnt; ii++) {
 909                csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
 910                csio_shost_exit(lnode_list[ii]);
 911        }
 912
 913free_lnodes:
 914        kfree(lnode_list);
 915}
 916
 917/*
 918 * csio_lnode_init_post: Set lnode attributes after starting HW.
 919 * @ln: lnode.
 920 *
 921 */
 922static void
 923csio_lnode_init_post(struct csio_lnode *ln)
 924{
 925        struct Scsi_Host  *shost = csio_ln_to_shost(ln);
 926
 927        csio_fchost_attr_init(ln);
 928
 929        scsi_scan_host(shost);
 930}
 931
 932/*
 933 * csio_probe_one - Instantiate this function.
 934 * @pdev: PCI device
 935 * @id: Device ID
 936 *
 937 * This is the .probe() callback of the driver. This function:
 938 * - Initializes the PCI function by enabling MMIO, setting bus
 939 *   mastership and setting DMA mask.
 940 * - Allocates HW structure, DMA, memory resources, maps BARS to
 941 *   host memory and initializes HW module.
 942 * - Allocates lnode structure via scsi_host_alloc, initializes
 943 *   shost, initialized lnode module and registers with SCSI ML
 944 *   via scsi_host_add.
 945 * - Enables interrupts, and starts the chip by kicking off the
 946 *   HW state machine.
 947 * - Once hardware is ready, initiated scan of the host via
 948 *   scsi_scan_host.
 949 */
 950static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 951{
 952        int rv;
 953        int bars;
 954        int i;
 955        struct csio_hw *hw;
 956        struct csio_lnode *ln;
 957
 958        /* probe only T5 and T6 cards */
 959        if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) &&
 960            !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK)))
 961                return -ENODEV;
 962
 963        rv = csio_pci_init(pdev, &bars);
 964        if (rv)
 965                goto err;
 966
 967        hw = csio_hw_alloc(pdev);
 968        if (!hw) {
 969                rv = -ENODEV;
 970                goto err_pci_exit;
 971        }
 972
 973        if (!pcie_relaxed_ordering_enabled(pdev))
 974                hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING;
 975
 976        pci_set_drvdata(pdev, hw);
 977
 978        rv = csio_hw_start(hw);
 979        if (rv) {
 980                if (rv == -EINVAL) {
 981                        dev_err(&pdev->dev,
 982                                "Failed to start FW, continuing in debug mode.\n");
 983                        return 0;
 984                }
 985                goto err_lnode_exit;
 986        }
 987
 988        sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
 989                    FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
 990                    FW_HDR_FW_VER_MINOR_G(hw->fwrev),
 991                    FW_HDR_FW_VER_MICRO_G(hw->fwrev),
 992                    FW_HDR_FW_VER_BUILD_G(hw->fwrev));
 993
 994        for (i = 0; i < hw->num_pports; i++) {
 995                ln = csio_shost_init(hw, &pdev->dev, true, NULL);
 996                if (!ln) {
 997                        rv = -ENODEV;
 998                        break;
 999                }
1000                /* Initialize portid */
1001                ln->portid = hw->pport[i].portid;
1002
1003                spin_lock_irq(&hw->lock);
1004                if (csio_lnode_start(ln) != 0)
1005                        rv = -ENODEV;
1006                spin_unlock_irq(&hw->lock);
1007
1008                if (rv)
1009                        break;
1010
1011                csio_lnode_init_post(ln);
1012        }
1013
1014        if (rv)
1015                goto err_lnode_exit;
1016
1017        return 0;
1018
1019err_lnode_exit:
1020        csio_lnodes_block_request(hw);
1021        spin_lock_irq(&hw->lock);
1022        csio_hw_stop(hw);
1023        spin_unlock_irq(&hw->lock);
1024        csio_lnodes_unblock_request(hw);
1025        pci_set_drvdata(hw->pdev, NULL);
1026        csio_lnodes_exit(hw, 0);
1027        csio_hw_free(hw);
1028err_pci_exit:
1029        csio_pci_exit(pdev, &bars);
1030err:
1031        dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
1032        return rv;
1033}
1034
1035/*
1036 * csio_remove_one - Remove one instance of the driver at this PCI function.
1037 * @pdev: PCI device
1038 *
1039 * Used during hotplug operation.
1040 */
1041static void csio_remove_one(struct pci_dev *pdev)
1042{
1043        struct csio_hw *hw = pci_get_drvdata(pdev);
1044        int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1045
1046        csio_lnodes_block_request(hw);
1047        spin_lock_irq(&hw->lock);
1048
1049        /* Stops lnode, Rnode s/m
1050         * Quiesce IOs.
1051         * All sessions with remote ports are unregistered.
1052         */
1053        csio_hw_stop(hw);
1054        spin_unlock_irq(&hw->lock);
1055        csio_lnodes_unblock_request(hw);
1056
1057        csio_lnodes_exit(hw, 0);
1058        csio_hw_free(hw);
1059        pci_set_drvdata(pdev, NULL);
1060        csio_pci_exit(pdev, &bars);
1061}
1062
1063/*
1064 * csio_pci_error_detected - PCI error was detected
1065 * @pdev: PCI device
1066 *
1067 */
1068static pci_ers_result_t
1069csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1070{
1071        struct csio_hw *hw = pci_get_drvdata(pdev);
1072
1073        csio_lnodes_block_request(hw);
1074        spin_lock_irq(&hw->lock);
1075
1076        /* Post PCI error detected evt to HW s/m
1077         * HW s/m handles this evt by quiescing IOs, unregisters rports
1078         * and finally takes the device to offline.
1079         */
1080        csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
1081        spin_unlock_irq(&hw->lock);
1082        csio_lnodes_unblock_request(hw);
1083        csio_lnodes_exit(hw, 0);
1084        csio_intr_disable(hw, true);
1085        pci_disable_device(pdev);
1086        return state == pci_channel_io_perm_failure ?
1087                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1088}
1089
1090/*
1091 * csio_pci_slot_reset - PCI slot has been reset.
1092 * @pdev: PCI device
1093 *
1094 */
1095static pci_ers_result_t
1096csio_pci_slot_reset(struct pci_dev *pdev)
1097{
1098        struct csio_hw *hw = pci_get_drvdata(pdev);
1099        int ready;
1100
1101        if (pci_enable_device(pdev)) {
1102                dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
1103                return PCI_ERS_RESULT_DISCONNECT;
1104        }
1105
1106        pci_set_master(pdev);
1107        pci_restore_state(pdev);
1108        pci_save_state(pdev);
1109        pci_cleanup_aer_uncorrect_error_status(pdev);
1110
1111        /* Bring HW s/m to ready state.
1112         * but don't resume IOs.
1113         */
1114        spin_lock_irq(&hw->lock);
1115        csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
1116        ready = csio_is_hw_ready(hw);
1117        spin_unlock_irq(&hw->lock);
1118
1119        if (ready) {
1120                return PCI_ERS_RESULT_RECOVERED;
1121        } else {
1122                dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
1123                return PCI_ERS_RESULT_DISCONNECT;
1124        }
1125}
1126
1127/*
1128 * csio_pci_resume - Resume normal operations
1129 * @pdev: PCI device
1130 *
1131 */
1132static void
1133csio_pci_resume(struct pci_dev *pdev)
1134{
1135        struct csio_hw *hw = pci_get_drvdata(pdev);
1136        struct csio_lnode *ln;
1137        int rv = 0;
1138        int i;
1139
1140        /* Bring the LINK UP and Resume IO */
1141
1142        for (i = 0; i < hw->num_pports; i++) {
1143                ln = csio_shost_init(hw, &pdev->dev, true, NULL);
1144                if (!ln) {
1145                        rv = -ENODEV;
1146                        break;
1147                }
1148                /* Initialize portid */
1149                ln->portid = hw->pport[i].portid;
1150
1151                spin_lock_irq(&hw->lock);
1152                if (csio_lnode_start(ln) != 0)
1153                        rv = -ENODEV;
1154                spin_unlock_irq(&hw->lock);
1155
1156                if (rv)
1157                        break;
1158
1159                csio_lnode_init_post(ln);
1160        }
1161
1162        if (rv)
1163                goto err_resume_exit;
1164
1165        return;
1166
1167err_resume_exit:
1168        csio_lnodes_block_request(hw);
1169        spin_lock_irq(&hw->lock);
1170        csio_hw_stop(hw);
1171        spin_unlock_irq(&hw->lock);
1172        csio_lnodes_unblock_request(hw);
1173        csio_lnodes_exit(hw, 0);
1174        csio_hw_free(hw);
1175        dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
1176}
1177
1178static struct pci_error_handlers csio_err_handler = {
1179        .error_detected = csio_pci_error_detected,
1180        .slot_reset     = csio_pci_slot_reset,
1181        .resume         = csio_pci_resume,
1182};
1183
1184/*
1185 *  Macros needed to support the PCI Device ID Table ...
1186 */
1187#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
1188        static const struct pci_device_id csio_pci_tbl[] = {
1189/* Define for FCoE uses PF6 */
1190#define CH_PCI_DEVICE_ID_FUNCTION       0x6
1191
1192#define CH_PCI_ID_TABLE_ENTRY(devid) \
1193                { PCI_VDEVICE(CHELSIO, (devid)), 0 }
1194
1195#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
1196
1197#include "t4_pci_id_tbl.h"
1198
1199static struct pci_driver csio_pci_driver = {
1200        .name           = KBUILD_MODNAME,
1201        .driver         = {
1202                .owner  = THIS_MODULE,
1203        },
1204        .id_table       = csio_pci_tbl,
1205        .probe          = csio_probe_one,
1206        .remove         = csio_remove_one,
1207        .err_handler    = &csio_err_handler,
1208};
1209
1210/*
1211 * csio_init - Chelsio storage driver initialization function.
1212 *
1213 */
1214static int __init
1215csio_init(void)
1216{
1217        int rv = -ENOMEM;
1218
1219        pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
1220
1221        csio_dfs_init();
1222
1223        csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
1224        if (!csio_fcoe_transport)
1225                goto err;
1226
1227        csio_fcoe_transport_vport =
1228                        fc_attach_transport(&csio_fc_transport_vport_funcs);
1229        if (!csio_fcoe_transport_vport)
1230                goto err_vport;
1231
1232        rv = pci_register_driver(&csio_pci_driver);
1233        if (rv)
1234                goto err_pci;
1235
1236        return 0;
1237
1238err_pci:
1239        fc_release_transport(csio_fcoe_transport_vport);
1240err_vport:
1241        fc_release_transport(csio_fcoe_transport);
1242err:
1243        csio_dfs_exit();
1244        return rv;
1245}
1246
1247/*
1248 * csio_exit - Chelsio storage driver uninitialization .
1249 *
1250 * Function that gets called in the unload path.
1251 */
1252static void __exit
1253csio_exit(void)
1254{
1255        pci_unregister_driver(&csio_pci_driver);
1256        csio_dfs_exit();
1257        fc_release_transport(csio_fcoe_transport_vport);
1258        fc_release_transport(csio_fcoe_transport);
1259}
1260
1261module_init(csio_init);
1262module_exit(csio_exit);
1263MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1264MODULE_DESCRIPTION(CSIO_DRV_DESC);
1265MODULE_LICENSE("Dual BSD/GPL");
1266MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1267MODULE_VERSION(CSIO_DRV_VERSION);
1268MODULE_FIRMWARE(FW_FNAME_T5);
1269MODULE_FIRMWARE(FW_FNAME_T6);
1270MODULE_SOFTDEP("pre: cxgb4");
1271