linux/drivers/scsi/qla2xxx/qla_os.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2011 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/moduleparam.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12#include <linux/kthread.h>
  13#include <linux/mutex.h>
  14#include <linux/kobject.h>
  15#include <linux/slab.h>
  16
  17#include <scsi/scsi_tcq.h>
  18#include <scsi/scsicam.h>
  19#include <scsi/scsi_transport.h>
  20#include <scsi/scsi_transport_fc.h>
  21
  22/*
  23 * Driver version
  24 */
  25char qla2x00_version_str[40];
  26
  27static int apidev_major;
  28
  29/*
  30 * SRB allocation cache
  31 */
  32static struct kmem_cache *srb_cachep;
  33
  34/*
  35 * CT6 CTX allocation cache
  36 */
  37static struct kmem_cache *ctx_cachep;
  38
  39int ql2xlogintimeout = 20;
  40module_param(ql2xlogintimeout, int, S_IRUGO);
  41MODULE_PARM_DESC(ql2xlogintimeout,
  42                "Login timeout value in seconds.");
  43
  44int qlport_down_retry;
  45module_param(qlport_down_retry, int, S_IRUGO);
  46MODULE_PARM_DESC(qlport_down_retry,
  47                "Maximum number of command retries to a port that returns "
  48                "a PORT-DOWN status.");
  49
  50int ql2xplogiabsentdevice;
  51module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  52MODULE_PARM_DESC(ql2xplogiabsentdevice,
  53                "Option to enable PLOGI to devices that are not present after "
  54                "a Fabric scan.  This is needed for several broken switches. "
  55                "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
  56
  57int ql2xloginretrycount = 0;
  58module_param(ql2xloginretrycount, int, S_IRUGO);
  59MODULE_PARM_DESC(ql2xloginretrycount,
  60                "Specify an alternate value for the NVRAM login retry count.");
  61
  62int ql2xallocfwdump = 1;
  63module_param(ql2xallocfwdump, int, S_IRUGO);
  64MODULE_PARM_DESC(ql2xallocfwdump,
  65                "Option to enable allocation of memory for a firmware dump "
  66                "during HBA initialization.  Memory allocation requirements "
  67                "vary by ISP type.  Default is 1 - allocate memory.");
  68
  69int ql2xextended_error_logging;
  70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  71MODULE_PARM_DESC(ql2xextended_error_logging,
  72                "Option to enable extended error logging, "
  73                "Default is 0 - no logging. 1 - log errors.");
  74
  75int ql2xshiftctondsd = 6;
  76module_param(ql2xshiftctondsd, int, S_IRUGO);
  77MODULE_PARM_DESC(ql2xshiftctondsd,
  78                "Set to control shifting of command type processing "
  79                "based on total number of SG elements.");
  80
  81static void qla2x00_free_device(scsi_qla_host_t *);
  82
  83int ql2xfdmienable=1;
  84module_param(ql2xfdmienable, int, S_IRUGO);
  85MODULE_PARM_DESC(ql2xfdmienable,
  86                "Enables FDMI registrations. "
  87                "0 - no FDMI. Default is 1 - perform FDMI.");
  88
  89#define MAX_Q_DEPTH    32
  90static int ql2xmaxqdepth = MAX_Q_DEPTH;
  91module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
  92MODULE_PARM_DESC(ql2xmaxqdepth,
  93                "Maximum queue depth to report for target devices.");
  94
  95/* Do not change the value of this after module load */
  96int ql2xenabledif = 1;
  97module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
  98MODULE_PARM_DESC(ql2xenabledif,
  99                " Enable T10-CRC-DIF "
 100                " Default is 0 - No DIF Support. 1 - Enable it");
 101
 102int ql2xenablehba_err_chk;
 103module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 104MODULE_PARM_DESC(ql2xenablehba_err_chk,
 105                " Enable T10-CRC-DIF Error isolation by HBA"
 106                " Default is 0 - Error isolation disabled, 1 - Enable it");
 107
 108int ql2xiidmaenable=1;
 109module_param(ql2xiidmaenable, int, S_IRUGO);
 110MODULE_PARM_DESC(ql2xiidmaenable,
 111                "Enables iIDMA settings "
 112                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 113
 114int ql2xmaxqueues = 1;
 115module_param(ql2xmaxqueues, int, S_IRUGO);
 116MODULE_PARM_DESC(ql2xmaxqueues,
 117                "Enables MQ settings "
 118                "Default is 1 for single queue. Set it to number "
 119                "of queues in MQ mode.");
 120
 121int ql2xmultique_tag;
 122module_param(ql2xmultique_tag, int, S_IRUGO);
 123MODULE_PARM_DESC(ql2xmultique_tag,
 124                "Enables CPU affinity settings for the driver "
 125                "Default is 0 for no affinity of request and response IO. "
 126                "Set it to 1 to turn on the cpu affinity.");
 127
 128int ql2xfwloadbin;
 129module_param(ql2xfwloadbin, int, S_IRUGO);
 130MODULE_PARM_DESC(ql2xfwloadbin,
 131                "Option to specify location from which to load ISP firmware:\n"
 132                " 2 -- load firmware via the request_firmware() (hotplug)\n"
 133                "      interface.\n"
 134                " 1 -- load firmware from flash.\n"
 135                " 0 -- use default semantics.\n");
 136
 137int ql2xetsenable;
 138module_param(ql2xetsenable, int, S_IRUGO);
 139MODULE_PARM_DESC(ql2xetsenable,
 140                "Enables firmware ETS burst."
 141                "Default is 0 - skip ETS enablement.");
 142
 143int ql2xdbwr = 1;
 144module_param(ql2xdbwr, int, S_IRUGO);
 145MODULE_PARM_DESC(ql2xdbwr,
 146        "Option to specify scheme for request queue posting\n"
 147        " 0 -- Regular doorbell.\n"
 148        " 1 -- CAMRAM doorbell (faster).\n");
 149
 150int ql2xtargetreset = 1;
 151module_param(ql2xtargetreset, int, S_IRUGO);
 152MODULE_PARM_DESC(ql2xtargetreset,
 153                 "Enable target reset."
 154                 "Default is 1 - use hw defaults.");
 155
 156int ql2xgffidenable;
 157module_param(ql2xgffidenable, int, S_IRUGO);
 158MODULE_PARM_DESC(ql2xgffidenable,
 159                "Enables GFF_ID checks of port type. "
 160                "Default is 0 - Do not use GFF_ID information.");
 161
 162int ql2xasynctmfenable;
 163module_param(ql2xasynctmfenable, int, S_IRUGO);
 164MODULE_PARM_DESC(ql2xasynctmfenable,
 165                "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
 166                "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
 167
 168int ql2xdontresethba;
 169module_param(ql2xdontresethba, int, S_IRUGO);
 170MODULE_PARM_DESC(ql2xdontresethba,
 171        "Option to specify reset behaviour\n"
 172        " 0 (Default) -- Reset on failure.\n"
 173        " 1 -- Do not reset on failure.\n");
 174
 175uint ql2xmaxlun = MAX_LUNS;
 176module_param(ql2xmaxlun, uint, S_IRUGO);
 177MODULE_PARM_DESC(ql2xmaxlun,
 178                "Defines the maximum LU number to register with the SCSI "
 179                "midlayer. Default is 65535.");
 180
 181/*
 182 * SCSI host template entry points
 183 */
 184static int qla2xxx_slave_configure(struct scsi_device * device);
 185static int qla2xxx_slave_alloc(struct scsi_device *);
 186static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
 187static void qla2xxx_scan_start(struct Scsi_Host *);
 188static void qla2xxx_slave_destroy(struct scsi_device *);
 189static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 190static int qla2xxx_eh_abort(struct scsi_cmnd *);
 191static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
 192static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
 193static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
 194static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 195
 196static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
 197static int qla2x00_change_queue_type(struct scsi_device *, int);
 198
 199struct scsi_host_template qla2xxx_driver_template = {
 200        .module                 = THIS_MODULE,
 201        .name                   = QLA2XXX_DRIVER_NAME,
 202        .queuecommand           = qla2xxx_queuecommand,
 203
 204        .eh_abort_handler       = qla2xxx_eh_abort,
 205        .eh_device_reset_handler = qla2xxx_eh_device_reset,
 206        .eh_target_reset_handler = qla2xxx_eh_target_reset,
 207        .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
 208        .eh_host_reset_handler  = qla2xxx_eh_host_reset,
 209
 210        .slave_configure        = qla2xxx_slave_configure,
 211
 212        .slave_alloc            = qla2xxx_slave_alloc,
 213        .slave_destroy          = qla2xxx_slave_destroy,
 214        .scan_finished          = qla2xxx_scan_finished,
 215        .scan_start             = qla2xxx_scan_start,
 216        .change_queue_depth     = qla2x00_change_queue_depth,
 217        .change_queue_type      = qla2x00_change_queue_type,
 218        .this_id                = -1,
 219        .cmd_per_lun            = 3,
 220        .use_clustering         = ENABLE_CLUSTERING,
 221        .sg_tablesize           = SG_ALL,
 222
 223        .max_sectors            = 0xFFFF,
 224        .shost_attrs            = qla2x00_host_attrs,
 225};
 226
 227static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 228struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 229
 230/* TODO Convert to inlines
 231 *
 232 * Timer routines
 233 */
 234
 235__inline__ void
 236qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
 237{
 238        init_timer(&vha->timer);
 239        vha->timer.expires = jiffies + interval * HZ;
 240        vha->timer.data = (unsigned long)vha;
 241        vha->timer.function = (void (*)(unsigned long))func;
 242        add_timer(&vha->timer);
 243        vha->timer_active = 1;
 244}
 245
 246static inline void
 247qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
 248{
 249        /* Currently used for 82XX only. */
 250        if (vha->device_flags & DFLG_DEV_FAILED)
 251                return;
 252
 253        mod_timer(&vha->timer, jiffies + interval * HZ);
 254}
 255
 256static __inline__ void
 257qla2x00_stop_timer(scsi_qla_host_t *vha)
 258{
 259        del_timer_sync(&vha->timer);
 260        vha->timer_active = 0;
 261}
 262
 263static int qla2x00_do_dpc(void *data);
 264
 265static void qla2x00_rst_aen(scsi_qla_host_t *);
 266
 267static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 268        struct req_que **, struct rsp_que **);
 269static void qla2x00_free_fw_dump(struct qla_hw_data *);
 270static void qla2x00_mem_free(struct qla_hw_data *);
 271static void qla2x00_sp_free_dma(srb_t *);
 272
 273/* -------------------------------------------------------------------------- */
 274static int qla2x00_alloc_queues(struct qla_hw_data *ha)
 275{
 276        ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
 277                                GFP_KERNEL);
 278        if (!ha->req_q_map) {
 279                qla_printk(KERN_WARNING, ha,
 280                        "Unable to allocate memory for request queue ptrs\n");
 281                goto fail_req_map;
 282        }
 283
 284        ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
 285                                GFP_KERNEL);
 286        if (!ha->rsp_q_map) {
 287                qla_printk(KERN_WARNING, ha,
 288                        "Unable to allocate memory for response queue ptrs\n");
 289                goto fail_rsp_map;
 290        }
 291        set_bit(0, ha->rsp_qid_map);
 292        set_bit(0, ha->req_qid_map);
 293        return 1;
 294
 295fail_rsp_map:
 296        kfree(ha->req_q_map);
 297        ha->req_q_map = NULL;
 298fail_req_map:
 299        return -ENOMEM;
 300}
 301
 302static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 303{
 304        if (req && req->ring)
 305                dma_free_coherent(&ha->pdev->dev,
 306                (req->length + 1) * sizeof(request_t),
 307                req->ring, req->dma);
 308
 309        kfree(req);
 310        req = NULL;
 311}
 312
 313static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 314{
 315        if (rsp && rsp->ring)
 316                dma_free_coherent(&ha->pdev->dev,
 317                (rsp->length + 1) * sizeof(response_t),
 318                rsp->ring, rsp->dma);
 319
 320        kfree(rsp);
 321        rsp = NULL;
 322}
 323
 324static void qla2x00_free_queues(struct qla_hw_data *ha)
 325{
 326        struct req_que *req;
 327        struct rsp_que *rsp;
 328        int cnt;
 329
 330        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
 331                req = ha->req_q_map[cnt];
 332                qla2x00_free_req_que(ha, req);
 333        }
 334        kfree(ha->req_q_map);
 335        ha->req_q_map = NULL;
 336
 337        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
 338                rsp = ha->rsp_q_map[cnt];
 339                qla2x00_free_rsp_que(ha, rsp);
 340        }
 341        kfree(ha->rsp_q_map);
 342        ha->rsp_q_map = NULL;
 343}
 344
 345static int qla25xx_setup_mode(struct scsi_qla_host *vha)
 346{
 347        uint16_t options = 0;
 348        int ques, req, ret;
 349        struct qla_hw_data *ha = vha->hw;
 350
 351        if (!(ha->fw_attributes & BIT_6)) {
 352                qla_printk(KERN_INFO, ha,
 353                        "Firmware is not multi-queue capable\n");
 354                goto fail;
 355        }
 356        if (ql2xmultique_tag) {
 357                /* create a request queue for IO */
 358                options |= BIT_7;
 359                req = qla25xx_create_req_que(ha, options, 0, 0, -1,
 360                        QLA_DEFAULT_QUE_QOS);
 361                if (!req) {
 362                        qla_printk(KERN_WARNING, ha,
 363                                "Can't create request queue\n");
 364                        goto fail;
 365                }
 366                ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
 367                vha->req = ha->req_q_map[req];
 368                options |= BIT_1;
 369                for (ques = 1; ques < ha->max_rsp_queues; ques++) {
 370                        ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
 371                        if (!ret) {
 372                                qla_printk(KERN_WARNING, ha,
 373                                        "Response Queue create failed\n");
 374                                goto fail2;
 375                        }
 376                }
 377                ha->flags.cpu_affinity_enabled = 1;
 378
 379                DEBUG2(qla_printk(KERN_INFO, ha,
 380                        "CPU affinity mode enabled, no. of response"
 381                        " queues:%d, no. of request queues:%d\n",
 382                        ha->max_rsp_queues, ha->max_req_queues));
 383        }
 384        return 0;
 385fail2:
 386        qla25xx_delete_queues(vha);
 387        destroy_workqueue(ha->wq);
 388        ha->wq = NULL;
 389fail:
 390        ha->mqenable = 0;
 391        kfree(ha->req_q_map);
 392        kfree(ha->rsp_q_map);
 393        ha->max_req_queues = ha->max_rsp_queues = 1;
 394        return 1;
 395}
 396
 397static char *
 398qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
 399{
 400        struct qla_hw_data *ha = vha->hw;
 401        static char *pci_bus_modes[] = {
 402                "33", "66", "100", "133",
 403        };
 404        uint16_t pci_bus;
 405
 406        strcpy(str, "PCI");
 407        pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
 408        if (pci_bus) {
 409                strcat(str, "-X (");
 410                strcat(str, pci_bus_modes[pci_bus]);
 411        } else {
 412                pci_bus = (ha->pci_attr & BIT_8) >> 8;
 413                strcat(str, " (");
 414                strcat(str, pci_bus_modes[pci_bus]);
 415        }
 416        strcat(str, " MHz)");
 417
 418        return (str);
 419}
 420
 421static char *
 422qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
 423{
 424        static char *pci_bus_modes[] = { "33", "66", "100", "133", };
 425        struct qla_hw_data *ha = vha->hw;
 426        uint32_t pci_bus;
 427        int pcie_reg;
 428
 429        pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
 430        if (pcie_reg) {
 431                char lwstr[6];
 432                uint16_t pcie_lstat, lspeed, lwidth;
 433
 434                pcie_reg += 0x12;
 435                pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
 436                lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
 437                lwidth = (pcie_lstat &
 438                    (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
 439
 440                strcpy(str, "PCIe (");
 441                if (lspeed == 1)
 442                        strcat(str, "2.5GT/s ");
 443                else if (lspeed == 2)
 444                        strcat(str, "5.0GT/s ");
 445                else
 446                        strcat(str, "<unknown> ");
 447                snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
 448                strcat(str, lwstr);
 449
 450                return str;
 451        }
 452
 453        strcpy(str, "PCI");
 454        pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
 455        if (pci_bus == 0 || pci_bus == 8) {
 456                strcat(str, " (");
 457                strcat(str, pci_bus_modes[pci_bus >> 3]);
 458        } else {
 459                strcat(str, "-X ");
 460                if (pci_bus & BIT_2)
 461                        strcat(str, "Mode 2");
 462                else
 463                        strcat(str, "Mode 1");
 464                strcat(str, " (");
 465                strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
 466        }
 467        strcat(str, " MHz)");
 468
 469        return str;
 470}
 471
 472static char *
 473qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
 474{
 475        char un_str[10];
 476        struct qla_hw_data *ha = vha->hw;
 477
 478        sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
 479            ha->fw_minor_version,
 480            ha->fw_subminor_version);
 481
 482        if (ha->fw_attributes & BIT_9) {
 483                strcat(str, "FLX");
 484                return (str);
 485        }
 486
 487        switch (ha->fw_attributes & 0xFF) {
 488        case 0x7:
 489                strcat(str, "EF");
 490                break;
 491        case 0x17:
 492                strcat(str, "TP");
 493                break;
 494        case 0x37:
 495                strcat(str, "IP");
 496                break;
 497        case 0x77:
 498                strcat(str, "VI");
 499                break;
 500        default:
 501                sprintf(un_str, "(%x)", ha->fw_attributes);
 502                strcat(str, un_str);
 503                break;
 504        }
 505        if (ha->fw_attributes & 0x100)
 506                strcat(str, "X");
 507
 508        return (str);
 509}
 510
 511static char *
 512qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
 513{
 514        struct qla_hw_data *ha = vha->hw;
 515
 516        sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
 517            ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 518        return str;
 519}
 520
 521static inline srb_t *
 522qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
 523        struct scsi_cmnd *cmd)
 524{
 525        srb_t *sp;
 526        struct qla_hw_data *ha = vha->hw;
 527
 528        sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
 529        if (!sp)
 530                return sp;
 531
 532        atomic_set(&sp->ref_count, 1);
 533        sp->fcport = fcport;
 534        sp->cmd = cmd;
 535        sp->flags = 0;
 536        CMD_SP(cmd) = (void *)sp;
 537        sp->ctx = NULL;
 538
 539        return sp;
 540}
 541
 542static int
 543qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 544{
 545        scsi_qla_host_t *vha = shost_priv(host);
 546        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 547        struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 548        struct qla_hw_data *ha = vha->hw;
 549        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 550        srb_t *sp;
 551        int rval;
 552
 553        if (ha->flags.eeh_busy) {
 554                if (ha->flags.pci_channel_io_perm_failure)
 555                        cmd->result = DID_NO_CONNECT << 16;
 556                else
 557                        cmd->result = DID_REQUEUE << 16;
 558                goto qc24_fail_command;
 559        }
 560
 561        rval = fc_remote_port_chkready(rport);
 562        if (rval) {
 563                cmd->result = rval;
 564                goto qc24_fail_command;
 565        }
 566
 567        if (!vha->flags.difdix_supported &&
 568                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
 569                        DEBUG2(qla_printk(KERN_ERR, ha,
 570                            "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
 571                            cmd->cmnd[0]));
 572                        cmd->result = DID_NO_CONNECT << 16;
 573                        goto qc24_fail_command;
 574        }
 575        if (atomic_read(&fcport->state) != FCS_ONLINE) {
 576                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 577                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 578                        cmd->result = DID_NO_CONNECT << 16;
 579                        goto qc24_fail_command;
 580                }
 581                goto qc24_target_busy;
 582        }
 583
 584        sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
 585        if (!sp)
 586                goto qc24_host_busy;
 587
 588        rval = ha->isp_ops->start_scsi(sp);
 589        if (rval != QLA_SUCCESS)
 590                goto qc24_host_busy_free_sp;
 591
 592        return 0;
 593
 594qc24_host_busy_free_sp:
 595        qla2x00_sp_free_dma(sp);
 596        mempool_free(sp, ha->srb_mempool);
 597
 598qc24_host_busy:
 599        return SCSI_MLQUEUE_HOST_BUSY;
 600
 601qc24_target_busy:
 602        return SCSI_MLQUEUE_TARGET_BUSY;
 603
 604qc24_fail_command:
 605        cmd->scsi_done(cmd);
 606
 607        return 0;
 608}
 609
 610/*
 611 * qla2x00_eh_wait_on_command
 612 *    Waits for the command to be returned by the Firmware for some
 613 *    max time.
 614 *
 615 * Input:
 616 *    cmd = Scsi Command to wait on.
 617 *
 618 * Return:
 619 *    Not Found : 0
 620 *    Found : 1
 621 */
 622static int
 623qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
 624{
 625#define ABORT_POLLING_PERIOD    1000
 626#define ABORT_WAIT_ITER         ((10 * 1000) / (ABORT_POLLING_PERIOD))
 627        unsigned long wait_iter = ABORT_WAIT_ITER;
 628        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 629        struct qla_hw_data *ha = vha->hw;
 630        int ret = QLA_SUCCESS;
 631
 632        if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
 633                DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
 634                return ret;
 635        }
 636
 637        while (CMD_SP(cmd) && wait_iter--) {
 638                msleep(ABORT_POLLING_PERIOD);
 639        }
 640        if (CMD_SP(cmd))
 641                ret = QLA_FUNCTION_FAILED;
 642
 643        return ret;
 644}
 645
 646/*
 647 * qla2x00_wait_for_hba_online
 648 *    Wait till the HBA is online after going through
 649 *    <= MAX_RETRIES_OF_ISP_ABORT  or
 650 *    finally HBA is disabled ie marked offline
 651 *
 652 * Input:
 653 *     ha - pointer to host adapter structure
 654 *
 655 * Note:
 656 *    Does context switching-Release SPIN_LOCK
 657 *    (if any) before calling this routine.
 658 *
 659 * Return:
 660 *    Success (Adapter is online) : 0
 661 *    Failed  (Adapter is offline/disabled) : 1
 662 */
 663int
 664qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
 665{
 666        int             return_status;
 667        unsigned long   wait_online;
 668        struct qla_hw_data *ha = vha->hw;
 669        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 670
 671        wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 672        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
 673            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 674            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 675            ha->dpc_active) && time_before(jiffies, wait_online)) {
 676
 677                msleep(1000);
 678        }
 679        if (base_vha->flags.online)
 680                return_status = QLA_SUCCESS;
 681        else
 682                return_status = QLA_FUNCTION_FAILED;
 683
 684        return (return_status);
 685}
 686
 687/*
 688 * qla2x00_wait_for_reset_ready
 689 *    Wait till the HBA is online after going through
 690 *    <= MAX_RETRIES_OF_ISP_ABORT  or
 691 *    finally HBA is disabled ie marked offline or flash
 692 *    operations are in progress.
 693 *
 694 * Input:
 695 *     ha - pointer to host adapter structure
 696 *
 697 * Note:
 698 *    Does context switching-Release SPIN_LOCK
 699 *    (if any) before calling this routine.
 700 *
 701 * Return:
 702 *    Success (Adapter is online/no flash ops) : 0
 703 *    Failed  (Adapter is offline/disabled/flash ops in progress) : 1
 704 */
 705static int
 706qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
 707{
 708        int             return_status;
 709        unsigned long   wait_online;
 710        struct qla_hw_data *ha = vha->hw;
 711        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 712
 713        wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 714        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
 715            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 716            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 717            ha->optrom_state != QLA_SWAITING ||
 718            ha->dpc_active) && time_before(jiffies, wait_online))
 719                msleep(1000);
 720
 721        if (base_vha->flags.online &&  ha->optrom_state == QLA_SWAITING)
 722                return_status = QLA_SUCCESS;
 723        else
 724                return_status = QLA_FUNCTION_FAILED;
 725
 726        DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
 727
 728        return return_status;
 729}
 730
 731int
 732qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
 733{
 734        int             return_status;
 735        unsigned long   wait_reset;
 736        struct qla_hw_data *ha = vha->hw;
 737        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 738
 739        wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 740        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
 741            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 742            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 743            ha->dpc_active) && time_before(jiffies, wait_reset)) {
 744
 745                msleep(1000);
 746
 747                if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
 748                    ha->flags.chip_reset_done)
 749                        break;
 750        }
 751        if (ha->flags.chip_reset_done)
 752                return_status = QLA_SUCCESS;
 753        else
 754                return_status = QLA_FUNCTION_FAILED;
 755
 756        return return_status;
 757}
 758
 759/*
 760 * qla2x00_wait_for_loop_ready
 761 *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
 762 *    to be in LOOP_READY state.
 763 * Input:
 764 *     ha - pointer to host adapter structure
 765 *
 766 * Note:
 767 *    Does context switching-Release SPIN_LOCK
 768 *    (if any) before calling this routine.
 769 *
 770 *
 771 * Return:
 772 *    Success (LOOP_READY) : 0
 773 *    Failed  (LOOP_NOT_READY) : 1
 774 */
 775static inline int
 776qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
 777{
 778        int      return_status = QLA_SUCCESS;
 779        unsigned long loop_timeout ;
 780        struct qla_hw_data *ha = vha->hw;
 781        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 782
 783        /* wait for 5 min at the max for loop to be ready */
 784        loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 785
 786        while ((!atomic_read(&base_vha->loop_down_timer) &&
 787            atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
 788            atomic_read(&base_vha->loop_state) != LOOP_READY) {
 789                if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 790                        return_status = QLA_FUNCTION_FAILED;
 791                        break;
 792                }
 793                msleep(1000);
 794                if (time_after_eq(jiffies, loop_timeout)) {
 795                        return_status = QLA_FUNCTION_FAILED;
 796                        break;
 797                }
 798        }
 799        return (return_status);
 800}
 801
 802static void
 803sp_get(struct srb *sp)
 804{
 805        atomic_inc(&sp->ref_count);
 806}
 807
 808/**************************************************************************
 809* qla2xxx_eh_abort
 810*
 811* Description:
 812*    The abort function will abort the specified command.
 813*
 814* Input:
 815*    cmd = Linux SCSI command packet to be aborted.
 816*
 817* Returns:
 818*    Either SUCCESS or FAILED.
 819*
 820* Note:
 821*    Only return FAILED if command not returned by firmware.
 822**************************************************************************/
 823static int
 824qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 825{
 826        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 827        srb_t *sp;
 828        int ret;
 829        unsigned int id, lun;
 830        unsigned long flags;
 831        int wait = 0;
 832        struct qla_hw_data *ha = vha->hw;
 833
 834        if (!CMD_SP(cmd))
 835                return SUCCESS;
 836
 837        ret = fc_block_scsi_eh(cmd);
 838        if (ret != 0)
 839                return ret;
 840        ret = SUCCESS;
 841
 842        id = cmd->device->id;
 843        lun = cmd->device->lun;
 844
 845        spin_lock_irqsave(&ha->hardware_lock, flags);
 846        sp = (srb_t *) CMD_SP(cmd);
 847        if (!sp) {
 848                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 849                return SUCCESS;
 850        }
 851
 852        DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
 853            __func__, vha->host_no, sp));
 854
 855        /* Get a reference to the sp and drop the lock.*/
 856        sp_get(sp);
 857
 858        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 859        if (ha->isp_ops->abort_command(sp)) {
 860                DEBUG2(printk("%s(%ld): abort_command "
 861                "mbx failed.\n", __func__, vha->host_no));
 862                ret = FAILED;
 863        } else {
 864                DEBUG3(printk("%s(%ld): abort_command "
 865                "mbx success.\n", __func__, vha->host_no));
 866                wait = 1;
 867        }
 868        qla2x00_sp_compl(ha, sp);
 869
 870        /* Wait for the command to be returned. */
 871        if (wait) {
 872                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
 873                        qla_printk(KERN_ERR, ha,
 874                            "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
 875                            vha->host_no, id, lun, ret);
 876                        ret = FAILED;
 877                }
 878        }
 879
 880        qla_printk(KERN_INFO, ha,
 881            "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
 882            vha->host_no, id, lun, wait, ret);
 883
 884        return ret;
 885}
 886
 887int
 888qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
 889        unsigned int l, enum nexus_wait_type type)
 890{
 891        int cnt, match, status;
 892        unsigned long flags;
 893        struct qla_hw_data *ha = vha->hw;
 894        struct req_que *req;
 895        srb_t *sp;
 896
 897        status = QLA_SUCCESS;
 898
 899        spin_lock_irqsave(&ha->hardware_lock, flags);
 900        req = vha->req;
 901        for (cnt = 1; status == QLA_SUCCESS &&
 902                cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 903                sp = req->outstanding_cmds[cnt];
 904                if (!sp)
 905                        continue;
 906                if ((sp->ctx) && !IS_PROT_IO(sp))
 907                        continue;
 908                if (vha->vp_idx != sp->fcport->vha->vp_idx)
 909                        continue;
 910                match = 0;
 911                switch (type) {
 912                case WAIT_HOST:
 913                        match = 1;
 914                        break;
 915                case WAIT_TARGET:
 916                        match = sp->cmd->device->id == t;
 917                        break;
 918                case WAIT_LUN:
 919                        match = (sp->cmd->device->id == t &&
 920                                sp->cmd->device->lun == l);
 921                        break;
 922                }
 923                if (!match)
 924                        continue;
 925
 926                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 927                status = qla2x00_eh_wait_on_command(sp->cmd);
 928                spin_lock_irqsave(&ha->hardware_lock, flags);
 929        }
 930        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 931
 932        return status;
 933}
 934
 935static char *reset_errors[] = {
 936        "HBA not online",
 937        "HBA not ready",
 938        "Task management failed",
 939        "Waiting for command completions",
 940};
 941
 942static int
 943__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 944    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
 945{
 946        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 947        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 948        int err;
 949
 950        if (!fcport)
 951                return FAILED;
 952
 953        err = fc_block_scsi_eh(cmd);
 954        if (err != 0)
 955                return err;
 956
 957        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
 958            vha->host_no, cmd->device->id, cmd->device->lun, name);
 959
 960        err = 0;
 961        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
 962                goto eh_reset_failed;
 963        err = 1;
 964        if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
 965                goto eh_reset_failed;
 966        err = 2;
 967        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
 968                != QLA_SUCCESS)
 969                goto eh_reset_failed;
 970        err = 3;
 971        if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
 972            cmd->device->lun, type) != QLA_SUCCESS)
 973                goto eh_reset_failed;
 974
 975        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
 976            vha->host_no, cmd->device->id, cmd->device->lun, name);
 977
 978        return SUCCESS;
 979
 980eh_reset_failed:
 981        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
 982            , vha->host_no, cmd->device->id, cmd->device->lun, name,
 983            reset_errors[err]);
 984        return FAILED;
 985}
 986
 987static int
 988qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 989{
 990        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 991        struct qla_hw_data *ha = vha->hw;
 992
 993        return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
 994            ha->isp_ops->lun_reset);
 995}
 996
 997static int
 998qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 999{
1000        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1001        struct qla_hw_data *ha = vha->hw;
1002
1003        return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1004            ha->isp_ops->target_reset);
1005}
1006
1007/**************************************************************************
1008* qla2xxx_eh_bus_reset
1009*
1010* Description:
1011*    The bus reset function will reset the bus and abort any executing
1012*    commands.
1013*
1014* Input:
1015*    cmd = Linux SCSI command packet of the command that cause the
1016*          bus reset.
1017*
1018* Returns:
1019*    SUCCESS/FAILURE (defined as macro in scsi.h).
1020*
1021**************************************************************************/
1022static int
1023qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1024{
1025        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1026        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1027        int ret = FAILED;
1028        unsigned int id, lun;
1029
1030        id = cmd->device->id;
1031        lun = cmd->device->lun;
1032
1033        if (!fcport)
1034                return ret;
1035
1036        ret = fc_block_scsi_eh(cmd);
1037        if (ret != 0)
1038                return ret;
1039        ret = FAILED;
1040
1041        qla_printk(KERN_INFO, vha->hw,
1042            "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
1043
1044        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1045                DEBUG2(printk("%s failed:board disabled\n",__func__));
1046                goto eh_bus_reset_done;
1047        }
1048
1049        if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
1050                if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1051                        ret = SUCCESS;
1052        }
1053        if (ret == FAILED)
1054                goto eh_bus_reset_done;
1055
1056        /* Flush outstanding commands. */
1057        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1058            QLA_SUCCESS)
1059                ret = FAILED;
1060
1061eh_bus_reset_done:
1062        qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
1063            (ret == FAILED) ? "failed" : "succeeded");
1064
1065        return ret;
1066}
1067
1068/**************************************************************************
1069* qla2xxx_eh_host_reset
1070*
1071* Description:
1072*    The reset function will reset the Adapter.
1073*
1074* Input:
1075*      cmd = Linux SCSI command packet of the command that cause the
1076*            adapter reset.
1077*
1078* Returns:
1079*      Either SUCCESS or FAILED.
1080*
1081* Note:
1082**************************************************************************/
1083static int
1084qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1085{
1086        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1087        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1088        struct qla_hw_data *ha = vha->hw;
1089        int ret = FAILED;
1090        unsigned int id, lun;
1091        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1092
1093        id = cmd->device->id;
1094        lun = cmd->device->lun;
1095
1096        if (!fcport)
1097                return ret;
1098
1099        ret = fc_block_scsi_eh(cmd);
1100        if (ret != 0)
1101                return ret;
1102        ret = FAILED;
1103
1104        qla_printk(KERN_INFO, ha,
1105            "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1106
1107        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1108                goto eh_host_reset_lock;
1109
1110        /*
1111         * Fixme-may be dpc thread is active and processing
1112         * loop_resync,so wait a while for it to
1113         * be completed and then issue big hammer.Otherwise
1114         * it may cause I/O failure as big hammer marks the
1115         * devices as lost kicking of the port_down_timer
1116         * while dpc is stuck for the mailbox to complete.
1117         */
1118        qla2x00_wait_for_loop_ready(vha);
1119        if (vha != base_vha) {
1120                if (qla2x00_vp_abort_isp(vha))
1121                        goto eh_host_reset_lock;
1122        } else {
1123                if (IS_QLA82XX(vha->hw)) {
1124                        if (!qla82xx_fcoe_ctx_reset(vha)) {
1125                                /* Ctx reset success */
1126                                ret = SUCCESS;
1127                                goto eh_host_reset_lock;
1128                        }
1129                        /* fall thru if ctx reset failed */
1130                }
1131                if (ha->wq)
1132                        flush_workqueue(ha->wq);
1133
1134                set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1135                if (ha->isp_ops->abort_isp(base_vha)) {
1136                        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1137                        /* failed. schedule dpc to try */
1138                        set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1139
1140                        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1141                                goto eh_host_reset_lock;
1142                }
1143                clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1144        }
1145
1146        /* Waiting for command to be returned to OS.*/
1147        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1148                QLA_SUCCESS)
1149                ret = SUCCESS;
1150
1151eh_host_reset_lock:
1152        qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1153            (ret == FAILED) ? "failed" : "succeeded");
1154
1155        return ret;
1156}
1157
1158/*
1159* qla2x00_loop_reset
1160*      Issue loop reset.
1161*
1162* Input:
1163*      ha = adapter block pointer.
1164*
1165* Returns:
1166*      0 = success
1167*/
1168int
1169qla2x00_loop_reset(scsi_qla_host_t *vha)
1170{
1171        int ret;
1172        struct fc_port *fcport;
1173        struct qla_hw_data *ha = vha->hw;
1174
1175        if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1176                list_for_each_entry(fcport, &vha->vp_fcports, list) {
1177                        if (fcport->port_type != FCT_TARGET)
1178                                continue;
1179
1180                        ret = ha->isp_ops->target_reset(fcport, 0, 0);
1181                        if (ret != QLA_SUCCESS) {
1182                                DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1183                                    "target_reset=%d d_id=%x.\n", __func__,
1184                                    vha->host_no, ret, fcport->d_id.b24));
1185                        }
1186                }
1187        }
1188
1189        if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1190                ret = qla2x00_full_login_lip(vha);
1191                if (ret != QLA_SUCCESS) {
1192                        DEBUG2_3(printk("%s(%ld): failed: "
1193                            "full_login_lip=%d.\n", __func__, vha->host_no,
1194                            ret));
1195                }
1196                atomic_set(&vha->loop_state, LOOP_DOWN);
1197                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1198                qla2x00_mark_all_devices_lost(vha, 0);
1199                qla2x00_wait_for_loop_ready(vha);
1200        }
1201
1202        if (ha->flags.enable_lip_reset) {
1203                ret = qla2x00_lip_reset(vha);
1204                if (ret != QLA_SUCCESS) {
1205                        DEBUG2_3(printk("%s(%ld): failed: "
1206                            "lip_reset=%d.\n", __func__, vha->host_no, ret));
1207                } else
1208                        qla2x00_wait_for_loop_ready(vha);
1209        }
1210
1211        /* Issue marker command only when we are going to start the I/O */
1212        vha->marker_needed = 1;
1213
1214        return QLA_SUCCESS;
1215}
1216
1217void
1218qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1219{
1220        int que, cnt;
1221        unsigned long flags;
1222        srb_t *sp;
1223        struct srb_ctx *ctx;
1224        struct qla_hw_data *ha = vha->hw;
1225        struct req_que *req;
1226
1227        spin_lock_irqsave(&ha->hardware_lock, flags);
1228        for (que = 0; que < ha->max_req_queues; que++) {
1229                req = ha->req_q_map[que];
1230                if (!req)
1231                        continue;
1232                for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1233                        sp = req->outstanding_cmds[cnt];
1234                        if (sp) {
1235                                req->outstanding_cmds[cnt] = NULL;
1236                                if (!sp->ctx ||
1237                                        (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1238                                        IS_PROT_IO(sp)) {
1239                                        sp->cmd->result = res;
1240                                        qla2x00_sp_compl(ha, sp);
1241                                } else {
1242                                        ctx = sp->ctx;
1243                                        if (ctx->type == SRB_LOGIN_CMD ||
1244                                            ctx->type == SRB_LOGOUT_CMD) {
1245                                                ctx->u.iocb_cmd->free(sp);
1246                                        } else {
1247                                                struct fc_bsg_job *bsg_job =
1248                                                    ctx->u.bsg_job;
1249                                                if (bsg_job->request->msgcode
1250                                                    == FC_BSG_HST_CT)
1251                                                        kfree(sp->fcport);
1252                                                bsg_job->req->errors = 0;
1253                                                bsg_job->reply->result = res;
1254                                                bsg_job->job_done(bsg_job);
1255                                                kfree(sp->ctx);
1256                                                mempool_free(sp,
1257                                                        ha->srb_mempool);
1258                                        }
1259                                }
1260                        }
1261                }
1262        }
1263        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1264}
1265
1266static int
1267qla2xxx_slave_alloc(struct scsi_device *sdev)
1268{
1269        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1270
1271        if (!rport || fc_remote_port_chkready(rport))
1272                return -ENXIO;
1273
1274        sdev->hostdata = *(fc_port_t **)rport->dd_data;
1275
1276        return 0;
1277}
1278
1279static int
1280qla2xxx_slave_configure(struct scsi_device *sdev)
1281{
1282        scsi_qla_host_t *vha = shost_priv(sdev->host);
1283        struct req_que *req = vha->req;
1284
1285        if (sdev->tagged_supported)
1286                scsi_activate_tcq(sdev, req->max_q_depth);
1287        else
1288                scsi_deactivate_tcq(sdev, req->max_q_depth);
1289        return 0;
1290}
1291
1292static void
1293qla2xxx_slave_destroy(struct scsi_device *sdev)
1294{
1295        sdev->hostdata = NULL;
1296}
1297
1298static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1299{
1300        fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1301
1302        if (!scsi_track_queue_full(sdev, qdepth))
1303                return;
1304
1305        DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1306                "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1307                fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1308                sdev->queue_depth));
1309}
1310
1311static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1312{
1313        fc_port_t *fcport = sdev->hostdata;
1314        struct scsi_qla_host *vha = fcport->vha;
1315        struct qla_hw_data *ha = vha->hw;
1316        struct req_que *req = NULL;
1317
1318        req = vha->req;
1319        if (!req)
1320                return;
1321
1322        if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1323                return;
1324
1325        if (sdev->ordered_tags)
1326                scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1327        else
1328                scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1329
1330        DEBUG2(qla_printk(KERN_INFO, ha,
1331               "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1332               fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1333               sdev->queue_depth));
1334}
1335
1336static int
1337qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1338{
1339        switch (reason) {
1340        case SCSI_QDEPTH_DEFAULT:
1341                scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1342                break;
1343        case SCSI_QDEPTH_QFULL:
1344                qla2x00_handle_queue_full(sdev, qdepth);
1345                break;
1346        case SCSI_QDEPTH_RAMP_UP:
1347                qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1348                break;
1349        default:
1350                return -EOPNOTSUPP;
1351        }
1352
1353        return sdev->queue_depth;
1354}
1355
1356static int
1357qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1358{
1359        if (sdev->tagged_supported) {
1360                scsi_set_tag_type(sdev, tag_type);
1361                if (tag_type)
1362                        scsi_activate_tcq(sdev, sdev->queue_depth);
1363                else
1364                        scsi_deactivate_tcq(sdev, sdev->queue_depth);
1365        } else
1366                tag_type = 0;
1367
1368        return tag_type;
1369}
1370
1371/**
1372 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1373 * @ha: HA context
1374 *
1375 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1376 * supported addressing method.
1377 */
1378static void
1379qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1380{
1381        /* Assume a 32bit DMA mask. */
1382        ha->flags.enable_64bit_addressing = 0;
1383
1384        if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1385                /* Any upper-dword bits set? */
1386                if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1387                    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1388                        /* Ok, a 64bit DMA mask is applicable. */
1389                        ha->flags.enable_64bit_addressing = 1;
1390                        ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1391                        ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1392                        return;
1393                }
1394        }
1395
1396        dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1397        pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1398}
1399
1400static void
1401qla2x00_enable_intrs(struct qla_hw_data *ha)
1402{
1403        unsigned long flags = 0;
1404        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1405
1406        spin_lock_irqsave(&ha->hardware_lock, flags);
1407        ha->interrupts_on = 1;
1408        /* enable risc and host interrupts */
1409        WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1410        RD_REG_WORD(&reg->ictrl);
1411        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1412
1413}
1414
1415static void
1416qla2x00_disable_intrs(struct qla_hw_data *ha)
1417{
1418        unsigned long flags = 0;
1419        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1420
1421        spin_lock_irqsave(&ha->hardware_lock, flags);
1422        ha->interrupts_on = 0;
1423        /* disable risc and host interrupts */
1424        WRT_REG_WORD(&reg->ictrl, 0);
1425        RD_REG_WORD(&reg->ictrl);
1426        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1427}
1428
1429static void
1430qla24xx_enable_intrs(struct qla_hw_data *ha)
1431{
1432        unsigned long flags = 0;
1433        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1434
1435        spin_lock_irqsave(&ha->hardware_lock, flags);
1436        ha->interrupts_on = 1;
1437        WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1438        RD_REG_DWORD(&reg->ictrl);
1439        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1440}
1441
1442static void
1443qla24xx_disable_intrs(struct qla_hw_data *ha)
1444{
1445        unsigned long flags = 0;
1446        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1447
1448        if (IS_NOPOLLING_TYPE(ha))
1449                return;
1450        spin_lock_irqsave(&ha->hardware_lock, flags);
1451        ha->interrupts_on = 0;
1452        WRT_REG_DWORD(&reg->ictrl, 0);
1453        RD_REG_DWORD(&reg->ictrl);
1454        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1455}
1456
1457static struct isp_operations qla2100_isp_ops = {
1458        .pci_config             = qla2100_pci_config,
1459        .reset_chip             = qla2x00_reset_chip,
1460        .chip_diag              = qla2x00_chip_diag,
1461        .config_rings           = qla2x00_config_rings,
1462        .reset_adapter          = qla2x00_reset_adapter,
1463        .nvram_config           = qla2x00_nvram_config,
1464        .update_fw_options      = qla2x00_update_fw_options,
1465        .load_risc              = qla2x00_load_risc,
1466        .pci_info_str           = qla2x00_pci_info_str,
1467        .fw_version_str         = qla2x00_fw_version_str,
1468        .intr_handler           = qla2100_intr_handler,
1469        .enable_intrs           = qla2x00_enable_intrs,
1470        .disable_intrs          = qla2x00_disable_intrs,
1471        .abort_command          = qla2x00_abort_command,
1472        .target_reset           = qla2x00_abort_target,
1473        .lun_reset              = qla2x00_lun_reset,
1474        .fabric_login           = qla2x00_login_fabric,
1475        .fabric_logout          = qla2x00_fabric_logout,
1476        .calc_req_entries       = qla2x00_calc_iocbs_32,
1477        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1478        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1479        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1480        .read_nvram             = qla2x00_read_nvram_data,
1481        .write_nvram            = qla2x00_write_nvram_data,
1482        .fw_dump                = qla2100_fw_dump,
1483        .beacon_on              = NULL,
1484        .beacon_off             = NULL,
1485        .beacon_blink           = NULL,
1486        .read_optrom            = qla2x00_read_optrom_data,
1487        .write_optrom           = qla2x00_write_optrom_data,
1488        .get_flash_version      = qla2x00_get_flash_version,
1489        .start_scsi             = qla2x00_start_scsi,
1490        .abort_isp              = qla2x00_abort_isp,
1491};
1492
1493static struct isp_operations qla2300_isp_ops = {
1494        .pci_config             = qla2300_pci_config,
1495        .reset_chip             = qla2x00_reset_chip,
1496        .chip_diag              = qla2x00_chip_diag,
1497        .config_rings           = qla2x00_config_rings,
1498        .reset_adapter          = qla2x00_reset_adapter,
1499        .nvram_config           = qla2x00_nvram_config,
1500        .update_fw_options      = qla2x00_update_fw_options,
1501        .load_risc              = qla2x00_load_risc,
1502        .pci_info_str           = qla2x00_pci_info_str,
1503        .fw_version_str         = qla2x00_fw_version_str,
1504        .intr_handler           = qla2300_intr_handler,
1505        .enable_intrs           = qla2x00_enable_intrs,
1506        .disable_intrs          = qla2x00_disable_intrs,
1507        .abort_command          = qla2x00_abort_command,
1508        .target_reset           = qla2x00_abort_target,
1509        .lun_reset              = qla2x00_lun_reset,
1510        .fabric_login           = qla2x00_login_fabric,
1511        .fabric_logout          = qla2x00_fabric_logout,
1512        .calc_req_entries       = qla2x00_calc_iocbs_32,
1513        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1514        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1515        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1516        .read_nvram             = qla2x00_read_nvram_data,
1517        .write_nvram            = qla2x00_write_nvram_data,
1518        .fw_dump                = qla2300_fw_dump,
1519        .beacon_on              = qla2x00_beacon_on,
1520        .beacon_off             = qla2x00_beacon_off,
1521        .beacon_blink           = qla2x00_beacon_blink,
1522        .read_optrom            = qla2x00_read_optrom_data,
1523        .write_optrom           = qla2x00_write_optrom_data,
1524        .get_flash_version      = qla2x00_get_flash_version,
1525        .start_scsi             = qla2x00_start_scsi,
1526        .abort_isp              = qla2x00_abort_isp,
1527};
1528
1529static struct isp_operations qla24xx_isp_ops = {
1530        .pci_config             = qla24xx_pci_config,
1531        .reset_chip             = qla24xx_reset_chip,
1532        .chip_diag              = qla24xx_chip_diag,
1533        .config_rings           = qla24xx_config_rings,
1534        .reset_adapter          = qla24xx_reset_adapter,
1535        .nvram_config           = qla24xx_nvram_config,
1536        .update_fw_options      = qla24xx_update_fw_options,
1537        .load_risc              = qla24xx_load_risc,
1538        .pci_info_str           = qla24xx_pci_info_str,
1539        .fw_version_str         = qla24xx_fw_version_str,
1540        .intr_handler           = qla24xx_intr_handler,
1541        .enable_intrs           = qla24xx_enable_intrs,
1542        .disable_intrs          = qla24xx_disable_intrs,
1543        .abort_command          = qla24xx_abort_command,
1544        .target_reset           = qla24xx_abort_target,
1545        .lun_reset              = qla24xx_lun_reset,
1546        .fabric_login           = qla24xx_login_fabric,
1547        .fabric_logout          = qla24xx_fabric_logout,
1548        .calc_req_entries       = NULL,
1549        .build_iocbs            = NULL,
1550        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1551        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1552        .read_nvram             = qla24xx_read_nvram_data,
1553        .write_nvram            = qla24xx_write_nvram_data,
1554        .fw_dump                = qla24xx_fw_dump,
1555        .beacon_on              = qla24xx_beacon_on,
1556        .beacon_off             = qla24xx_beacon_off,
1557        .beacon_blink           = qla24xx_beacon_blink,
1558        .read_optrom            = qla24xx_read_optrom_data,
1559        .write_optrom           = qla24xx_write_optrom_data,
1560        .get_flash_version      = qla24xx_get_flash_version,
1561        .start_scsi             = qla24xx_start_scsi,
1562        .abort_isp              = qla2x00_abort_isp,
1563};
1564
1565static struct isp_operations qla25xx_isp_ops = {
1566        .pci_config             = qla25xx_pci_config,
1567        .reset_chip             = qla24xx_reset_chip,
1568        .chip_diag              = qla24xx_chip_diag,
1569        .config_rings           = qla24xx_config_rings,
1570        .reset_adapter          = qla24xx_reset_adapter,
1571        .nvram_config           = qla24xx_nvram_config,
1572        .update_fw_options      = qla24xx_update_fw_options,
1573        .load_risc              = qla24xx_load_risc,
1574        .pci_info_str           = qla24xx_pci_info_str,
1575        .fw_version_str         = qla24xx_fw_version_str,
1576        .intr_handler           = qla24xx_intr_handler,
1577        .enable_intrs           = qla24xx_enable_intrs,
1578        .disable_intrs          = qla24xx_disable_intrs,
1579        .abort_command          = qla24xx_abort_command,
1580        .target_reset           = qla24xx_abort_target,
1581        .lun_reset              = qla24xx_lun_reset,
1582        .fabric_login           = qla24xx_login_fabric,
1583        .fabric_logout          = qla24xx_fabric_logout,
1584        .calc_req_entries       = NULL,
1585        .build_iocbs            = NULL,
1586        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1587        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1588        .read_nvram             = qla25xx_read_nvram_data,
1589        .write_nvram            = qla25xx_write_nvram_data,
1590        .fw_dump                = qla25xx_fw_dump,
1591        .beacon_on              = qla24xx_beacon_on,
1592        .beacon_off             = qla24xx_beacon_off,
1593        .beacon_blink           = qla24xx_beacon_blink,
1594        .read_optrom            = qla25xx_read_optrom_data,
1595        .write_optrom           = qla24xx_write_optrom_data,
1596        .get_flash_version      = qla24xx_get_flash_version,
1597        .start_scsi             = qla24xx_dif_start_scsi,
1598        .abort_isp              = qla2x00_abort_isp,
1599};
1600
1601static struct isp_operations qla81xx_isp_ops = {
1602        .pci_config             = qla25xx_pci_config,
1603        .reset_chip             = qla24xx_reset_chip,
1604        .chip_diag              = qla24xx_chip_diag,
1605        .config_rings           = qla24xx_config_rings,
1606        .reset_adapter          = qla24xx_reset_adapter,
1607        .nvram_config           = qla81xx_nvram_config,
1608        .update_fw_options      = qla81xx_update_fw_options,
1609        .load_risc              = qla81xx_load_risc,
1610        .pci_info_str           = qla24xx_pci_info_str,
1611        .fw_version_str         = qla24xx_fw_version_str,
1612        .intr_handler           = qla24xx_intr_handler,
1613        .enable_intrs           = qla24xx_enable_intrs,
1614        .disable_intrs          = qla24xx_disable_intrs,
1615        .abort_command          = qla24xx_abort_command,
1616        .target_reset           = qla24xx_abort_target,
1617        .lun_reset              = qla24xx_lun_reset,
1618        .fabric_login           = qla24xx_login_fabric,
1619        .fabric_logout          = qla24xx_fabric_logout,
1620        .calc_req_entries       = NULL,
1621        .build_iocbs            = NULL,
1622        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1623        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1624        .read_nvram             = NULL,
1625        .write_nvram            = NULL,
1626        .fw_dump                = qla81xx_fw_dump,
1627        .beacon_on              = qla24xx_beacon_on,
1628        .beacon_off             = qla24xx_beacon_off,
1629        .beacon_blink           = qla24xx_beacon_blink,
1630        .read_optrom            = qla25xx_read_optrom_data,
1631        .write_optrom           = qla24xx_write_optrom_data,
1632        .get_flash_version      = qla24xx_get_flash_version,
1633        .start_scsi             = qla24xx_dif_start_scsi,
1634        .abort_isp              = qla2x00_abort_isp,
1635};
1636
1637static struct isp_operations qla82xx_isp_ops = {
1638        .pci_config             = qla82xx_pci_config,
1639        .reset_chip             = qla82xx_reset_chip,
1640        .chip_diag              = qla24xx_chip_diag,
1641        .config_rings           = qla82xx_config_rings,
1642        .reset_adapter          = qla24xx_reset_adapter,
1643        .nvram_config           = qla81xx_nvram_config,
1644        .update_fw_options      = qla24xx_update_fw_options,
1645        .load_risc              = qla82xx_load_risc,
1646        .pci_info_str           = qla82xx_pci_info_str,
1647        .fw_version_str         = qla24xx_fw_version_str,
1648        .intr_handler           = qla82xx_intr_handler,
1649        .enable_intrs           = qla82xx_enable_intrs,
1650        .disable_intrs          = qla82xx_disable_intrs,
1651        .abort_command          = qla24xx_abort_command,
1652        .target_reset           = qla24xx_abort_target,
1653        .lun_reset              = qla24xx_lun_reset,
1654        .fabric_login           = qla24xx_login_fabric,
1655        .fabric_logout          = qla24xx_fabric_logout,
1656        .calc_req_entries       = NULL,
1657        .build_iocbs            = NULL,
1658        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1659        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1660        .read_nvram             = qla24xx_read_nvram_data,
1661        .write_nvram            = qla24xx_write_nvram_data,
1662        .fw_dump                = qla24xx_fw_dump,
1663        .beacon_on              = qla24xx_beacon_on,
1664        .beacon_off             = qla24xx_beacon_off,
1665        .beacon_blink           = qla24xx_beacon_blink,
1666        .read_optrom            = qla82xx_read_optrom_data,
1667        .write_optrom           = qla82xx_write_optrom_data,
1668        .get_flash_version      = qla24xx_get_flash_version,
1669        .start_scsi             = qla82xx_start_scsi,
1670        .abort_isp              = qla82xx_abort_isp,
1671};
1672
1673static inline void
1674qla2x00_set_isp_flags(struct qla_hw_data *ha)
1675{
1676        ha->device_type = DT_EXTENDED_IDS;
1677        switch (ha->pdev->device) {
1678        case PCI_DEVICE_ID_QLOGIC_ISP2100:
1679                ha->device_type |= DT_ISP2100;
1680                ha->device_type &= ~DT_EXTENDED_IDS;
1681                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1682                break;
1683        case PCI_DEVICE_ID_QLOGIC_ISP2200:
1684                ha->device_type |= DT_ISP2200;
1685                ha->device_type &= ~DT_EXTENDED_IDS;
1686                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1687                break;
1688        case PCI_DEVICE_ID_QLOGIC_ISP2300:
1689                ha->device_type |= DT_ISP2300;
1690                ha->device_type |= DT_ZIO_SUPPORTED;
1691                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1692                break;
1693        case PCI_DEVICE_ID_QLOGIC_ISP2312:
1694                ha->device_type |= DT_ISP2312;
1695                ha->device_type |= DT_ZIO_SUPPORTED;
1696                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1697                break;
1698        case PCI_DEVICE_ID_QLOGIC_ISP2322:
1699                ha->device_type |= DT_ISP2322;
1700                ha->device_type |= DT_ZIO_SUPPORTED;
1701                if (ha->pdev->subsystem_vendor == 0x1028 &&
1702                    ha->pdev->subsystem_device == 0x0170)
1703                        ha->device_type |= DT_OEM_001;
1704                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1705                break;
1706        case PCI_DEVICE_ID_QLOGIC_ISP6312:
1707                ha->device_type |= DT_ISP6312;
1708                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1709                break;
1710        case PCI_DEVICE_ID_QLOGIC_ISP6322:
1711                ha->device_type |= DT_ISP6322;
1712                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1713                break;
1714        case PCI_DEVICE_ID_QLOGIC_ISP2422:
1715                ha->device_type |= DT_ISP2422;
1716                ha->device_type |= DT_ZIO_SUPPORTED;
1717                ha->device_type |= DT_FWI2;
1718                ha->device_type |= DT_IIDMA;
1719                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1720                break;
1721        case PCI_DEVICE_ID_QLOGIC_ISP2432:
1722                ha->device_type |= DT_ISP2432;
1723                ha->device_type |= DT_ZIO_SUPPORTED;
1724                ha->device_type |= DT_FWI2;
1725                ha->device_type |= DT_IIDMA;
1726                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1727                break;
1728        case PCI_DEVICE_ID_QLOGIC_ISP8432:
1729                ha->device_type |= DT_ISP8432;
1730                ha->device_type |= DT_ZIO_SUPPORTED;
1731                ha->device_type |= DT_FWI2;
1732                ha->device_type |= DT_IIDMA;
1733                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1734                break;
1735        case PCI_DEVICE_ID_QLOGIC_ISP5422:
1736                ha->device_type |= DT_ISP5422;
1737                ha->device_type |= DT_FWI2;
1738                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1739                break;
1740        case PCI_DEVICE_ID_QLOGIC_ISP5432:
1741                ha->device_type |= DT_ISP5432;
1742                ha->device_type |= DT_FWI2;
1743                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1744                break;
1745        case PCI_DEVICE_ID_QLOGIC_ISP2532:
1746                ha->device_type |= DT_ISP2532;
1747                ha->device_type |= DT_ZIO_SUPPORTED;
1748                ha->device_type |= DT_FWI2;
1749                ha->device_type |= DT_IIDMA;
1750                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1751                break;
1752        case PCI_DEVICE_ID_QLOGIC_ISP8001:
1753                ha->device_type |= DT_ISP8001;
1754                ha->device_type |= DT_ZIO_SUPPORTED;
1755                ha->device_type |= DT_FWI2;
1756                ha->device_type |= DT_IIDMA;
1757                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1758                break;
1759        case PCI_DEVICE_ID_QLOGIC_ISP8021:
1760                ha->device_type |= DT_ISP8021;
1761                ha->device_type |= DT_ZIO_SUPPORTED;
1762                ha->device_type |= DT_FWI2;
1763                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1764                /* Initialize 82XX ISP flags */
1765                qla82xx_init_flags(ha);
1766                break;
1767        }
1768
1769        if (IS_QLA82XX(ha))
1770                ha->port_no = !(ha->portnum & 1);
1771        else
1772                /* Get adapter physical port no from interrupt pin register. */
1773                pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1774
1775        if (ha->port_no & 1)
1776                ha->flags.port0 = 1;
1777        else
1778                ha->flags.port0 = 0;
1779}
1780
1781static int
1782qla2x00_iospace_config(struct qla_hw_data *ha)
1783{
1784        resource_size_t pio;
1785        uint16_t msix;
1786        int cpus;
1787
1788        if (IS_QLA82XX(ha))
1789                return qla82xx_iospace_config(ha);
1790
1791        if (pci_request_selected_regions(ha->pdev, ha->bars,
1792            QLA2XXX_DRIVER_NAME)) {
1793                qla_printk(KERN_WARNING, ha,
1794                    "Failed to reserve PIO/MMIO regions (%s)\n",
1795                    pci_name(ha->pdev));
1796
1797                goto iospace_error_exit;
1798        }
1799        if (!(ha->bars & 1))
1800                goto skip_pio;
1801
1802        /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1803        pio = pci_resource_start(ha->pdev, 0);
1804        if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1805                if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1806                        qla_printk(KERN_WARNING, ha,
1807                            "Invalid PCI I/O region size (%s)...\n",
1808                                pci_name(ha->pdev));
1809                        pio = 0;
1810                }
1811        } else {
1812                qla_printk(KERN_WARNING, ha,
1813                    "region #0 not a PIO resource (%s)...\n",
1814                    pci_name(ha->pdev));
1815                pio = 0;
1816        }
1817        ha->pio_address = pio;
1818
1819skip_pio:
1820        /* Use MMIO operations for all accesses. */
1821        if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1822                qla_printk(KERN_ERR, ha,
1823                    "region #1 not an MMIO resource (%s), aborting\n",
1824                    pci_name(ha->pdev));
1825                goto iospace_error_exit;
1826        }
1827        if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1828                qla_printk(KERN_ERR, ha,
1829                    "Invalid PCI mem region size (%s), aborting\n",
1830                        pci_name(ha->pdev));
1831                goto iospace_error_exit;
1832        }
1833
1834        ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1835        if (!ha->iobase) {
1836                qla_printk(KERN_ERR, ha,
1837                    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1838
1839                goto iospace_error_exit;
1840        }
1841
1842        /* Determine queue resources */
1843        ha->max_req_queues = ha->max_rsp_queues = 1;
1844        if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1845                (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1846                (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1847                goto mqiobase_exit;
1848
1849        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1850                        pci_resource_len(ha->pdev, 3));
1851        if (ha->mqiobase) {
1852                /* Read MSIX vector size of the board */
1853                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1854                ha->msix_count = msix;
1855                /* Max queues are bounded by available msix vectors */
1856                /* queue 0 uses two msix vectors */
1857                if (ql2xmultique_tag) {
1858                        cpus = num_online_cpus();
1859                        ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1860                                (cpus + 1) : (ha->msix_count - 1);
1861                        ha->max_req_queues = 2;
1862                } else if (ql2xmaxqueues > 1) {
1863                        ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1864                                                QLA_MQ_SIZE : ql2xmaxqueues;
1865                        DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1866                        " of request queues:%d\n", ha->max_req_queues));
1867                }
1868                qla_printk(KERN_INFO, ha,
1869                        "MSI-X vector count: %d\n", msix);
1870        } else
1871                qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1872
1873mqiobase_exit:
1874        ha->msix_count = ha->max_rsp_queues + 1;
1875        return (0);
1876
1877iospace_error_exit:
1878        return (-ENOMEM);
1879}
1880
1881static void
1882qla2xxx_scan_start(struct Scsi_Host *shost)
1883{
1884        scsi_qla_host_t *vha = shost_priv(shost);
1885
1886        if (vha->hw->flags.running_gold_fw)
1887                return;
1888
1889        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1890        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1891        set_bit(RSCN_UPDATE, &vha->dpc_flags);
1892        set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1893}
1894
1895static int
1896qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1897{
1898        scsi_qla_host_t *vha = shost_priv(shost);
1899
1900        if (!vha->host)
1901                return 1;
1902        if (time > vha->hw->loop_reset_delay * HZ)
1903                return 1;
1904
1905        return atomic_read(&vha->loop_state) == LOOP_READY;
1906}
1907
1908/*
1909 * PCI driver interface
1910 */
1911static int __devinit
1912qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1913{
1914        int     ret = -ENODEV;
1915        struct Scsi_Host *host;
1916        scsi_qla_host_t *base_vha = NULL;
1917        struct qla_hw_data *ha;
1918        char pci_info[30];
1919        char fw_str[30];
1920        struct scsi_host_template *sht;
1921        int bars, max_id, mem_only = 0;
1922        uint16_t req_length = 0, rsp_length = 0;
1923        struct req_que *req = NULL;
1924        struct rsp_que *rsp = NULL;
1925
1926        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1927        sht = &qla2xxx_driver_template;
1928        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1929            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1930            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1931            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1932            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1933            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1934            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1935            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1936                bars = pci_select_bars(pdev, IORESOURCE_MEM);
1937                mem_only = 1;
1938        }
1939
1940        if (mem_only) {
1941                if (pci_enable_device_mem(pdev))
1942                        goto probe_out;
1943        } else {
1944                if (pci_enable_device(pdev))
1945                        goto probe_out;
1946        }
1947
1948        /* This may fail but that's ok */
1949        pci_enable_pcie_error_reporting(pdev);
1950
1951        ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1952        if (!ha) {
1953                DEBUG(printk("Unable to allocate memory for ha\n"));
1954                goto probe_out;
1955        }
1956        ha->pdev = pdev;
1957
1958        /* Clear our data area */
1959        ha->bars = bars;
1960        ha->mem_only = mem_only;
1961        spin_lock_init(&ha->hardware_lock);
1962        spin_lock_init(&ha->vport_slock);
1963
1964        /* Set ISP-type information. */
1965        qla2x00_set_isp_flags(ha);
1966
1967        /* Set EEH reset type to fundamental if required by hba */
1968        if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1969                pdev->needs_freset = 1;
1970        }
1971
1972        /* Configure PCI I/O space */
1973        ret = qla2x00_iospace_config(ha);
1974        if (ret)
1975                goto probe_hw_failed;
1976
1977        qla_printk(KERN_INFO, ha,
1978            "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1979            ha->iobase);
1980
1981        ha->prev_topology = 0;
1982        ha->init_cb_size = sizeof(init_cb_t);
1983        ha->link_data_rate = PORT_SPEED_UNKNOWN;
1984        ha->optrom_size = OPTROM_SIZE_2300;
1985
1986        /* Assign ISP specific operations. */
1987        max_id = MAX_TARGETS_2200;
1988        if (IS_QLA2100(ha)) {
1989                max_id = MAX_TARGETS_2100;
1990                ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1991                req_length = REQUEST_ENTRY_CNT_2100;
1992                rsp_length = RESPONSE_ENTRY_CNT_2100;
1993                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1994                ha->gid_list_info_size = 4;
1995                ha->flash_conf_off = ~0;
1996                ha->flash_data_off = ~0;
1997                ha->nvram_conf_off = ~0;
1998                ha->nvram_data_off = ~0;
1999                ha->isp_ops = &qla2100_isp_ops;
2000        } else if (IS_QLA2200(ha)) {
2001                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2002                req_length = REQUEST_ENTRY_CNT_2200;
2003                rsp_length = RESPONSE_ENTRY_CNT_2100;
2004                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2005                ha->gid_list_info_size = 4;
2006                ha->flash_conf_off = ~0;
2007                ha->flash_data_off = ~0;
2008                ha->nvram_conf_off = ~0;
2009                ha->nvram_data_off = ~0;
2010                ha->isp_ops = &qla2100_isp_ops;
2011        } else if (IS_QLA23XX(ha)) {
2012                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2013                req_length = REQUEST_ENTRY_CNT_2200;
2014                rsp_length = RESPONSE_ENTRY_CNT_2300;
2015                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2016                ha->gid_list_info_size = 6;
2017                if (IS_QLA2322(ha) || IS_QLA6322(ha))
2018                        ha->optrom_size = OPTROM_SIZE_2322;
2019                ha->flash_conf_off = ~0;
2020                ha->flash_data_off = ~0;
2021                ha->nvram_conf_off = ~0;
2022                ha->nvram_data_off = ~0;
2023                ha->isp_ops = &qla2300_isp_ops;
2024        } else if (IS_QLA24XX_TYPE(ha)) {
2025                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2026                req_length = REQUEST_ENTRY_CNT_24XX;
2027                rsp_length = RESPONSE_ENTRY_CNT_2300;
2028                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2029                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2030                ha->gid_list_info_size = 8;
2031                ha->optrom_size = OPTROM_SIZE_24XX;
2032                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
2033                ha->isp_ops = &qla24xx_isp_ops;
2034                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2035                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2036                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2037                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2038        } else if (IS_QLA25XX(ha)) {
2039                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2040                req_length = REQUEST_ENTRY_CNT_24XX;
2041                rsp_length = RESPONSE_ENTRY_CNT_2300;
2042                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2043                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2044                ha->gid_list_info_size = 8;
2045                ha->optrom_size = OPTROM_SIZE_25XX;
2046                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2047                ha->isp_ops = &qla25xx_isp_ops;
2048                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2049                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2050                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2051                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2052        } else if (IS_QLA81XX(ha)) {
2053                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2054                req_length = REQUEST_ENTRY_CNT_24XX;
2055                rsp_length = RESPONSE_ENTRY_CNT_2300;
2056                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2057                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2058                ha->gid_list_info_size = 8;
2059                ha->optrom_size = OPTROM_SIZE_81XX;
2060                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2061                ha->isp_ops = &qla81xx_isp_ops;
2062                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2063                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2064                ha->nvram_conf_off = ~0;
2065                ha->nvram_data_off = ~0;
2066        } else if (IS_QLA82XX(ha)) {
2067                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2068                req_length = REQUEST_ENTRY_CNT_82XX;
2069                rsp_length = RESPONSE_ENTRY_CNT_82XX;
2070                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2071                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2072                ha->gid_list_info_size = 8;
2073                ha->optrom_size = OPTROM_SIZE_82XX;
2074                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2075                ha->isp_ops = &qla82xx_isp_ops;
2076                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2077                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2078                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2079                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2080        }
2081
2082        mutex_init(&ha->vport_lock);
2083        init_completion(&ha->mbx_cmd_comp);
2084        complete(&ha->mbx_cmd_comp);
2085        init_completion(&ha->mbx_intr_comp);
2086        init_completion(&ha->dcbx_comp);
2087
2088        set_bit(0, (unsigned long *) ha->vp_idx_map);
2089
2090        qla2x00_config_dma_addressing(ha);
2091        ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2092        if (!ret) {
2093                qla_printk(KERN_WARNING, ha,
2094                    "[ERROR] Failed to allocate memory for adapter\n");
2095
2096                goto probe_hw_failed;
2097        }
2098
2099        req->max_q_depth = MAX_Q_DEPTH;
2100        if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
2101                req->max_q_depth = ql2xmaxqdepth;
2102
2103
2104        base_vha = qla2x00_create_host(sht, ha);
2105        if (!base_vha) {
2106                qla_printk(KERN_WARNING, ha,
2107                    "[ERROR] Failed to allocate memory for scsi_host\n");
2108
2109                ret = -ENOMEM;
2110                qla2x00_mem_free(ha);
2111                qla2x00_free_req_que(ha, req);
2112                qla2x00_free_rsp_que(ha, rsp);
2113                goto probe_hw_failed;
2114        }
2115
2116        pci_set_drvdata(pdev, base_vha);
2117
2118        host = base_vha->host;
2119        base_vha->req = req;
2120        host->can_queue = req->length + 128;
2121        if (IS_QLA2XXX_MIDTYPE(ha))
2122                base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2123        else
2124                base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2125                                                base_vha->vp_idx;
2126
2127        /* Set the SG table size based on ISP type */
2128        if (!IS_FWI2_CAPABLE(ha)) {
2129                if (IS_QLA2100(ha))
2130                        host->sg_tablesize = 32;
2131        } else {
2132                if (!IS_QLA82XX(ha))
2133                        host->sg_tablesize = QLA_SG_ALL;
2134        }
2135
2136        host->max_id = max_id;
2137        host->this_id = 255;
2138        host->cmd_per_lun = 3;
2139        host->unique_id = host->host_no;
2140        if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
2141                host->max_cmd_len = 32;
2142        else
2143                host->max_cmd_len = MAX_CMDSZ;
2144        host->max_channel = MAX_BUSES - 1;
2145        host->max_lun = ql2xmaxlun;
2146        host->transportt = qla2xxx_transport_template;
2147        sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2148
2149        /* Set up the irqs */
2150        ret = qla2x00_request_irqs(ha, rsp);
2151        if (ret)
2152                goto probe_init_failed;
2153
2154        pci_save_state(pdev);
2155
2156        /* Alloc arrays of request and response ring ptrs */
2157que_init:
2158        if (!qla2x00_alloc_queues(ha)) {
2159                qla_printk(KERN_WARNING, ha,
2160                "[ERROR] Failed to allocate memory for queue"
2161                " pointers\n");
2162                goto probe_init_failed;
2163        }
2164
2165        ha->rsp_q_map[0] = rsp;
2166        ha->req_q_map[0] = req;
2167        rsp->req = req;
2168        req->rsp = rsp;
2169        set_bit(0, ha->req_qid_map);
2170        set_bit(0, ha->rsp_qid_map);
2171        /* FWI2-capable only. */
2172        req->req_q_in = &ha->iobase->isp24.req_q_in;
2173        req->req_q_out = &ha->iobase->isp24.req_q_out;
2174        rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2175        rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2176        if (ha->mqenable) {
2177                req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2178                req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2179                rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2180                rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
2181        }
2182
2183        if (IS_QLA82XX(ha)) {
2184                req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2185                rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2186                rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2187        }
2188
2189        if (qla2x00_initialize_adapter(base_vha)) {
2190                qla_printk(KERN_WARNING, ha,
2191                    "Failed to initialize adapter\n");
2192
2193                DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
2194                    "Adapter flags %x.\n",
2195                    base_vha->host_no, base_vha->device_flags));
2196
2197                if (IS_QLA82XX(ha)) {
2198                        qla82xx_idc_lock(ha);
2199                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2200                                QLA82XX_DEV_FAILED);
2201                        qla82xx_idc_unlock(ha);
2202                        qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2203                }
2204
2205                ret = -ENODEV;
2206                goto probe_failed;
2207        }
2208
2209        if (ha->mqenable) {
2210                if (qla25xx_setup_mode(base_vha)) {
2211                        qla_printk(KERN_WARNING, ha,
2212                                "Can't create queues, falling back to single"
2213                                " queue mode\n");
2214                        goto que_init;
2215                }
2216        }
2217
2218        if (ha->flags.running_gold_fw)
2219                goto skip_dpc;
2220
2221        /*
2222         * Startup the kernel thread for this host adapter
2223         */
2224        ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2225                        "%s_dpc", base_vha->host_str);
2226        if (IS_ERR(ha->dpc_thread)) {
2227                qla_printk(KERN_WARNING, ha,
2228                    "Unable to start DPC thread!\n");
2229                ret = PTR_ERR(ha->dpc_thread);
2230                goto probe_failed;
2231        }
2232
2233skip_dpc:
2234        list_add_tail(&base_vha->list, &ha->vp_list);
2235        base_vha->host->irq = ha->pdev->irq;
2236
2237        /* Initialized the timer */
2238        qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2239
2240        DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2241            base_vha->host_no, ha));
2242
2243        if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2244                if (ha->fw_attributes & BIT_4) {
2245                        base_vha->flags.difdix_supported = 1;
2246                        DEBUG18(qla_printk(KERN_INFO, ha,
2247                            "Registering for DIF/DIX type 1 and 3"
2248                            " protection.\n"));
2249                        scsi_host_set_prot(host,
2250                            SHOST_DIF_TYPE1_PROTECTION
2251                            | SHOST_DIF_TYPE2_PROTECTION
2252                            | SHOST_DIF_TYPE3_PROTECTION
2253                            | SHOST_DIX_TYPE1_PROTECTION
2254                            | SHOST_DIX_TYPE2_PROTECTION
2255                            | SHOST_DIX_TYPE3_PROTECTION);
2256                        scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2257                } else
2258                        base_vha->flags.difdix_supported = 0;
2259        }
2260
2261        ha->isp_ops->enable_intrs(ha);
2262
2263        ret = scsi_add_host(host, &pdev->dev);
2264        if (ret)
2265                goto probe_failed;
2266
2267        base_vha->flags.init_done = 1;
2268        base_vha->flags.online = 1;
2269
2270        scsi_scan_host(host);
2271
2272        qla2x00_alloc_sysfs_attr(base_vha);
2273
2274        qla2x00_init_host_attr(base_vha);
2275
2276        qla2x00_dfs_setup(base_vha);
2277
2278        qla_printk(KERN_INFO, ha, "\n"
2279            " QLogic Fibre Channel HBA Driver: %s\n"
2280            "  QLogic %s - %s\n"
2281            "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2282            qla2x00_version_str, ha->model_number,
2283            ha->model_desc ? ha->model_desc : "", pdev->device,
2284            ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2285            ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2286            ha->isp_ops->fw_version_str(base_vha, fw_str));
2287
2288        return 0;
2289
2290probe_init_failed:
2291        qla2x00_free_req_que(ha, req);
2292        qla2x00_free_rsp_que(ha, rsp);
2293        ha->max_req_queues = ha->max_rsp_queues = 0;
2294
2295probe_failed:
2296        if (base_vha->timer_active)
2297                qla2x00_stop_timer(base_vha);
2298        base_vha->flags.online = 0;
2299        if (ha->dpc_thread) {
2300                struct task_struct *t = ha->dpc_thread;
2301
2302                ha->dpc_thread = NULL;
2303                kthread_stop(t);
2304        }
2305
2306        qla2x00_free_device(base_vha);
2307
2308        scsi_host_put(base_vha->host);
2309
2310probe_hw_failed:
2311        if (IS_QLA82XX(ha)) {
2312                qla82xx_idc_lock(ha);
2313                qla82xx_clear_drv_active(ha);
2314                qla82xx_idc_unlock(ha);
2315                iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2316                if (!ql2xdbwr)
2317                        iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2318        } else {
2319                if (ha->iobase)
2320                        iounmap(ha->iobase);
2321        }
2322        pci_release_selected_regions(ha->pdev, ha->bars);
2323        kfree(ha);
2324        ha = NULL;
2325
2326probe_out:
2327        pci_disable_device(pdev);
2328        return ret;
2329}
2330
2331static void
2332qla2x00_shutdown(struct pci_dev *pdev)
2333{
2334        scsi_qla_host_t *vha;
2335        struct qla_hw_data  *ha;
2336
2337        vha = pci_get_drvdata(pdev);
2338        ha = vha->hw;
2339
2340        /* Turn-off FCE trace */
2341        if (ha->flags.fce_enabled) {
2342                qla2x00_disable_fce_trace(vha, NULL, NULL);
2343                ha->flags.fce_enabled = 0;
2344        }
2345
2346        /* Turn-off EFT trace */
2347        if (ha->eft)
2348                qla2x00_disable_eft_trace(vha);
2349
2350        /* Stop currently executing firmware. */
2351        qla2x00_try_to_stop_firmware(vha);
2352
2353        /* Turn adapter off line */
2354        vha->flags.online = 0;
2355
2356        /* turn-off interrupts on the card */
2357        if (ha->interrupts_on) {
2358                vha->flags.init_done = 0;
2359                ha->isp_ops->disable_intrs(ha);
2360        }
2361
2362        qla2x00_free_irqs(vha);
2363
2364        qla2x00_free_fw_dump(ha);
2365}
2366
2367static void
2368qla2x00_remove_one(struct pci_dev *pdev)
2369{
2370        scsi_qla_host_t *base_vha, *vha;
2371        struct qla_hw_data  *ha;
2372        unsigned long flags;
2373
2374        base_vha = pci_get_drvdata(pdev);
2375        ha = base_vha->hw;
2376
2377        mutex_lock(&ha->vport_lock);
2378        while (ha->cur_vport_count) {
2379                struct Scsi_Host *scsi_host;
2380
2381                spin_lock_irqsave(&ha->vport_slock, flags);
2382
2383                BUG_ON(base_vha->list.next == &ha->vp_list);
2384                /* This assumes first entry in ha->vp_list is always base vha */
2385                vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
2386                scsi_host = scsi_host_get(vha->host);
2387
2388                spin_unlock_irqrestore(&ha->vport_slock, flags);
2389                mutex_unlock(&ha->vport_lock);
2390
2391                fc_vport_terminate(vha->fc_vport);
2392                scsi_host_put(vha->host);
2393
2394                mutex_lock(&ha->vport_lock);
2395        }
2396        mutex_unlock(&ha->vport_lock);
2397
2398        set_bit(UNLOADING, &base_vha->dpc_flags);
2399
2400        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2401
2402        qla2x00_dfs_remove(base_vha);
2403
2404        qla84xx_put_chip(base_vha);
2405
2406        /* Disable timer */
2407        if (base_vha->timer_active)
2408                qla2x00_stop_timer(base_vha);
2409
2410        base_vha->flags.online = 0;
2411
2412        /* Flush the work queue and remove it */
2413        if (ha->wq) {
2414                flush_workqueue(ha->wq);
2415                destroy_workqueue(ha->wq);
2416                ha->wq = NULL;
2417        }
2418
2419        /* Kill the kernel thread for this host */
2420        if (ha->dpc_thread) {
2421                struct task_struct *t = ha->dpc_thread;
2422
2423                /*
2424                 * qla2xxx_wake_dpc checks for ->dpc_thread
2425                 * so we need to zero it out.
2426                 */
2427                ha->dpc_thread = NULL;
2428                kthread_stop(t);
2429        }
2430
2431        qla2x00_free_sysfs_attr(base_vha);
2432
2433        fc_remove_host(base_vha->host);
2434
2435        scsi_remove_host(base_vha->host);
2436
2437        qla2x00_free_device(base_vha);
2438
2439        scsi_host_put(base_vha->host);
2440
2441        if (IS_QLA82XX(ha)) {
2442                qla82xx_idc_lock(ha);
2443                qla82xx_clear_drv_active(ha);
2444                qla82xx_idc_unlock(ha);
2445
2446                iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2447                if (!ql2xdbwr)
2448                        iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2449        } else {
2450                if (ha->iobase)
2451                        iounmap(ha->iobase);
2452
2453                if (ha->mqiobase)
2454                        iounmap(ha->mqiobase);
2455        }
2456
2457        pci_release_selected_regions(ha->pdev, ha->bars);
2458        kfree(ha);
2459        ha = NULL;
2460
2461        pci_disable_pcie_error_reporting(pdev);
2462
2463        pci_disable_device(pdev);
2464        pci_set_drvdata(pdev, NULL);
2465}
2466
2467static void
2468qla2x00_free_device(scsi_qla_host_t *vha)
2469{
2470        struct qla_hw_data *ha = vha->hw;
2471
2472        qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2473
2474        /* Disable timer */
2475        if (vha->timer_active)
2476                qla2x00_stop_timer(vha);
2477
2478        /* Kill the kernel thread for this host */
2479        if (ha->dpc_thread) {
2480                struct task_struct *t = ha->dpc_thread;
2481
2482                /*
2483                 * qla2xxx_wake_dpc checks for ->dpc_thread
2484                 * so we need to zero it out.
2485                 */
2486                ha->dpc_thread = NULL;
2487                kthread_stop(t);
2488        }
2489
2490        qla25xx_delete_queues(vha);
2491
2492        if (ha->flags.fce_enabled)
2493                qla2x00_disable_fce_trace(vha, NULL, NULL);
2494
2495        if (ha->eft)
2496                qla2x00_disable_eft_trace(vha);
2497
2498        /* Stop currently executing firmware. */
2499        qla2x00_try_to_stop_firmware(vha);
2500
2501        vha->flags.online = 0;
2502
2503        /* turn-off interrupts on the card */
2504        if (ha->interrupts_on) {
2505                vha->flags.init_done = 0;
2506                ha->isp_ops->disable_intrs(ha);
2507        }
2508
2509        qla2x00_free_irqs(vha);
2510
2511        qla2x00_free_fcports(vha);
2512
2513        qla2x00_mem_free(ha);
2514
2515        qla2x00_free_queues(ha);
2516}
2517
2518void qla2x00_free_fcports(struct scsi_qla_host *vha)
2519{
2520        fc_port_t *fcport, *tfcport;
2521
2522        list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2523                list_del(&fcport->list);
2524                kfree(fcport);
2525                fcport = NULL;
2526        }
2527}
2528
2529static inline void
2530qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2531    int defer)
2532{
2533        struct fc_rport *rport;
2534        scsi_qla_host_t *base_vha;
2535        unsigned long flags;
2536
2537        if (!fcport->rport)
2538                return;
2539
2540        rport = fcport->rport;
2541        if (defer) {
2542                base_vha = pci_get_drvdata(vha->hw->pdev);
2543                spin_lock_irqsave(vha->host->host_lock, flags);
2544                fcport->drport = rport;
2545                spin_unlock_irqrestore(vha->host->host_lock, flags);
2546                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2547                qla2xxx_wake_dpc(base_vha);
2548        } else
2549                fc_remote_port_delete(rport);
2550}
2551
2552/*
2553 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2554 *
2555 * Input: ha = adapter block pointer.  fcport = port structure pointer.
2556 *
2557 * Return: None.
2558 *
2559 * Context:
2560 */
2561void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2562    int do_login, int defer)
2563{
2564        if (atomic_read(&fcport->state) == FCS_ONLINE &&
2565            vha->vp_idx == fcport->vp_idx) {
2566                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2567                qla2x00_schedule_rport_del(vha, fcport, defer);
2568        }
2569        /*
2570         * We may need to retry the login, so don't change the state of the
2571         * port but do the retries.
2572         */
2573        if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2574                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2575
2576        if (!do_login)
2577                return;
2578
2579        if (fcport->login_retry == 0) {
2580                fcport->login_retry = vha->hw->login_retry_count;
2581                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2582
2583                DEBUG(printk("scsi(%ld): Port login retry: "
2584                    "%02x%02x%02x%02x%02x%02x%02x%02x, "
2585                    "id = 0x%04x retry cnt=%d\n",
2586                    vha->host_no,
2587                    fcport->port_name[0],
2588                    fcport->port_name[1],
2589                    fcport->port_name[2],
2590                    fcport->port_name[3],
2591                    fcport->port_name[4],
2592                    fcport->port_name[5],
2593                    fcport->port_name[6],
2594                    fcport->port_name[7],
2595                    fcport->loop_id,
2596                    fcport->login_retry));
2597        }
2598}
2599
2600/*
2601 * qla2x00_mark_all_devices_lost
2602 *      Updates fcport state when device goes offline.
2603 *
2604 * Input:
2605 *      ha = adapter block pointer.
2606 *      fcport = port structure pointer.
2607 *
2608 * Return:
2609 *      None.
2610 *
2611 * Context:
2612 */
2613void
2614qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2615{
2616        fc_port_t *fcport;
2617
2618        list_for_each_entry(fcport, &vha->vp_fcports, list) {
2619                if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
2620                        continue;
2621
2622                /*
2623                 * No point in marking the device as lost, if the device is
2624                 * already DEAD.
2625                 */
2626                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2627                        continue;
2628                if (atomic_read(&fcport->state) == FCS_ONLINE) {
2629                        qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2630                        if (defer)
2631                                qla2x00_schedule_rport_del(vha, fcport, defer);
2632                        else if (vha->vp_idx == fcport->vp_idx)
2633                                qla2x00_schedule_rport_del(vha, fcport, defer);
2634                }
2635        }
2636}
2637
2638/*
2639* qla2x00_mem_alloc
2640*      Allocates adapter memory.
2641*
2642* Returns:
2643*      0  = success.
2644*      !0  = failure.
2645*/
2646static int
2647qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2648        struct req_que **req, struct rsp_que **rsp)
2649{
2650        char    name[16];
2651
2652        ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2653                &ha->init_cb_dma, GFP_KERNEL);
2654        if (!ha->init_cb)
2655                goto fail;
2656
2657        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2658                &ha->gid_list_dma, GFP_KERNEL);
2659        if (!ha->gid_list)
2660                goto fail_free_init_cb;
2661
2662        ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2663        if (!ha->srb_mempool)
2664                goto fail_free_gid_list;
2665
2666        if (IS_QLA82XX(ha)) {
2667                /* Allocate cache for CT6 Ctx. */
2668                if (!ctx_cachep) {
2669                        ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2670                                sizeof(struct ct6_dsd), 0,
2671                                SLAB_HWCACHE_ALIGN, NULL);
2672                        if (!ctx_cachep)
2673                                goto fail_free_gid_list;
2674                }
2675                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2676                        ctx_cachep);
2677                if (!ha->ctx_mempool)
2678                        goto fail_free_srb_mempool;
2679        }
2680
2681        /* Get memory for cached NVRAM */
2682        ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2683        if (!ha->nvram)
2684                goto fail_free_ctx_mempool;
2685
2686        snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2687                ha->pdev->device);
2688        ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2689                DMA_POOL_SIZE, 8, 0);
2690        if (!ha->s_dma_pool)
2691                goto fail_free_nvram;
2692
2693        if (IS_QLA82XX(ha) || ql2xenabledif) {
2694                ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2695                        DSD_LIST_DMA_POOL_SIZE, 8, 0);
2696                if (!ha->dl_dma_pool) {
2697                        qla_printk(KERN_WARNING, ha,
2698                            "Memory Allocation failed - dl_dma_pool\n");
2699                        goto fail_s_dma_pool;
2700                }
2701
2702                ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2703                        FCP_CMND_DMA_POOL_SIZE, 8, 0);
2704                if (!ha->fcp_cmnd_dma_pool) {
2705                        qla_printk(KERN_WARNING, ha,
2706                            "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2707                        goto fail_dl_dma_pool;
2708                }
2709        }
2710
2711        /* Allocate memory for SNS commands */
2712        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2713        /* Get consistent memory allocated for SNS commands */
2714                ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2715                sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2716                if (!ha->sns_cmd)
2717                        goto fail_dma_pool;
2718        } else {
2719        /* Get consistent memory allocated for MS IOCB */
2720                ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2721                        &ha->ms_iocb_dma);
2722                if (!ha->ms_iocb)
2723                        goto fail_dma_pool;
2724        /* Get consistent memory allocated for CT SNS commands */
2725                ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2726                        sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2727                if (!ha->ct_sns)
2728                        goto fail_free_ms_iocb;
2729        }
2730
2731        /* Allocate memory for request ring */
2732        *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2733        if (!*req) {
2734                DEBUG(printk("Unable to allocate memory for req\n"));
2735                goto fail_req;
2736        }
2737        (*req)->length = req_len;
2738        (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2739                ((*req)->length + 1) * sizeof(request_t),
2740                &(*req)->dma, GFP_KERNEL);
2741        if (!(*req)->ring) {
2742                DEBUG(printk("Unable to allocate memory for req_ring\n"));
2743                goto fail_req_ring;
2744        }
2745        /* Allocate memory for response ring */
2746        *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2747        if (!*rsp) {
2748                qla_printk(KERN_WARNING, ha,
2749                        "Unable to allocate memory for rsp\n");
2750                goto fail_rsp;
2751        }
2752        (*rsp)->hw = ha;
2753        (*rsp)->length = rsp_len;
2754        (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2755                ((*rsp)->length + 1) * sizeof(response_t),
2756                &(*rsp)->dma, GFP_KERNEL);
2757        if (!(*rsp)->ring) {
2758                qla_printk(KERN_WARNING, ha,
2759                        "Unable to allocate memory for rsp_ring\n");
2760                goto fail_rsp_ring;
2761        }
2762        (*req)->rsp = *rsp;
2763        (*rsp)->req = *req;
2764        /* Allocate memory for NVRAM data for vports */
2765        if (ha->nvram_npiv_size) {
2766                ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2767                                        ha->nvram_npiv_size, GFP_KERNEL);
2768                if (!ha->npiv_info) {
2769                        qla_printk(KERN_WARNING, ha,
2770                                "Unable to allocate memory for npiv info\n");
2771                        goto fail_npiv_info;
2772                }
2773        } else
2774                ha->npiv_info = NULL;
2775
2776        /* Get consistent memory allocated for EX-INIT-CB. */
2777        if (IS_QLA8XXX_TYPE(ha)) {
2778                ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2779                    &ha->ex_init_cb_dma);
2780                if (!ha->ex_init_cb)
2781                        goto fail_ex_init_cb;
2782        }
2783
2784        INIT_LIST_HEAD(&ha->gbl_dsd_list);
2785
2786        /* Get consistent memory allocated for Async Port-Database. */
2787        if (!IS_FWI2_CAPABLE(ha)) {
2788                ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2789                        &ha->async_pd_dma);
2790                if (!ha->async_pd)
2791                        goto fail_async_pd;
2792        }
2793
2794        INIT_LIST_HEAD(&ha->vp_list);
2795        return 1;
2796
2797fail_async_pd:
2798        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2799fail_ex_init_cb:
2800        kfree(ha->npiv_info);
2801fail_npiv_info:
2802        dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2803                sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2804        (*rsp)->ring = NULL;
2805        (*rsp)->dma = 0;
2806fail_rsp_ring:
2807        kfree(*rsp);
2808fail_rsp:
2809        dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2810                sizeof(request_t), (*req)->ring, (*req)->dma);
2811        (*req)->ring = NULL;
2812        (*req)->dma = 0;
2813fail_req_ring:
2814        kfree(*req);
2815fail_req:
2816        dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2817                ha->ct_sns, ha->ct_sns_dma);
2818        ha->ct_sns = NULL;
2819        ha->ct_sns_dma = 0;
2820fail_free_ms_iocb:
2821        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2822        ha->ms_iocb = NULL;
2823        ha->ms_iocb_dma = 0;
2824fail_dma_pool:
2825        if (IS_QLA82XX(ha) || ql2xenabledif) {
2826                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2827                ha->fcp_cmnd_dma_pool = NULL;
2828        }
2829fail_dl_dma_pool:
2830        if (IS_QLA82XX(ha) || ql2xenabledif) {
2831                dma_pool_destroy(ha->dl_dma_pool);
2832                ha->dl_dma_pool = NULL;
2833        }
2834fail_s_dma_pool:
2835        dma_pool_destroy(ha->s_dma_pool);
2836        ha->s_dma_pool = NULL;
2837fail_free_nvram:
2838        kfree(ha->nvram);
2839        ha->nvram = NULL;
2840fail_free_ctx_mempool:
2841        mempool_destroy(ha->ctx_mempool);
2842        ha->ctx_mempool = NULL;
2843fail_free_srb_mempool:
2844        mempool_destroy(ha->srb_mempool);
2845        ha->srb_mempool = NULL;
2846fail_free_gid_list:
2847        dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2848        ha->gid_list_dma);
2849        ha->gid_list = NULL;
2850        ha->gid_list_dma = 0;
2851fail_free_init_cb:
2852        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2853        ha->init_cb_dma);
2854        ha->init_cb = NULL;
2855        ha->init_cb_dma = 0;
2856fail:
2857        DEBUG(printk("%s: Memory allocation failure\n", __func__));
2858        return -ENOMEM;
2859}
2860
2861/*
2862* qla2x00_free_fw_dump
2863*       Frees fw dump stuff.
2864*
2865* Input:
2866*       ha = adapter block pointer.
2867*/
2868static void
2869qla2x00_free_fw_dump(struct qla_hw_data *ha)
2870{
2871        if (ha->fce)
2872                dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2873                    ha->fce_dma);
2874
2875        if (ha->fw_dump) {
2876                if (ha->eft)
2877                        dma_free_coherent(&ha->pdev->dev,
2878                            ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2879                vfree(ha->fw_dump);
2880        }
2881        ha->fce = NULL;
2882        ha->fce_dma = 0;
2883        ha->eft = NULL;
2884        ha->eft_dma = 0;
2885        ha->fw_dump = NULL;
2886        ha->fw_dumped = 0;
2887        ha->fw_dump_reading = 0;
2888}
2889
2890/*
2891* qla2x00_mem_free
2892*      Frees all adapter allocated memory.
2893*
2894* Input:
2895*      ha = adapter block pointer.
2896*/
2897static void
2898qla2x00_mem_free(struct qla_hw_data *ha)
2899{
2900        qla2x00_free_fw_dump(ha);
2901
2902        if (ha->srb_mempool)
2903                mempool_destroy(ha->srb_mempool);
2904
2905        if (ha->dcbx_tlv)
2906                dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2907                    ha->dcbx_tlv, ha->dcbx_tlv_dma);
2908
2909        if (ha->xgmac_data)
2910                dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2911                    ha->xgmac_data, ha->xgmac_data_dma);
2912
2913        if (ha->sns_cmd)
2914                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2915                ha->sns_cmd, ha->sns_cmd_dma);
2916
2917        if (ha->ct_sns)
2918                dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2919                ha->ct_sns, ha->ct_sns_dma);
2920
2921        if (ha->sfp_data)
2922                dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2923
2924        if (ha->edc_data)
2925                dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2926
2927        if (ha->ms_iocb)
2928                dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2929
2930        if (ha->ex_init_cb)
2931                dma_pool_free(ha->s_dma_pool,
2932                        ha->ex_init_cb, ha->ex_init_cb_dma);
2933
2934        if (ha->async_pd)
2935                dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2936
2937        if (ha->s_dma_pool)
2938                dma_pool_destroy(ha->s_dma_pool);
2939
2940        if (ha->gid_list)
2941                dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2942                ha->gid_list_dma);
2943
2944        if (IS_QLA82XX(ha)) {
2945                if (!list_empty(&ha->gbl_dsd_list)) {
2946                        struct dsd_dma *dsd_ptr, *tdsd_ptr;
2947
2948                        /* clean up allocated prev pool */
2949                        list_for_each_entry_safe(dsd_ptr,
2950                                tdsd_ptr, &ha->gbl_dsd_list, list) {
2951                                dma_pool_free(ha->dl_dma_pool,
2952                                dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2953                                list_del(&dsd_ptr->list);
2954                                kfree(dsd_ptr);
2955                        }
2956                }
2957        }
2958
2959        if (ha->dl_dma_pool)
2960                dma_pool_destroy(ha->dl_dma_pool);
2961
2962        if (ha->fcp_cmnd_dma_pool)
2963                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2964
2965        if (ha->ctx_mempool)
2966                mempool_destroy(ha->ctx_mempool);
2967
2968        if (ha->init_cb)
2969                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2970                        ha->init_cb, ha->init_cb_dma);
2971        vfree(ha->optrom_buffer);
2972        kfree(ha->nvram);
2973        kfree(ha->npiv_info);
2974
2975        ha->srb_mempool = NULL;
2976        ha->ctx_mempool = NULL;
2977        ha->sns_cmd = NULL;
2978        ha->sns_cmd_dma = 0;
2979        ha->ct_sns = NULL;
2980        ha->ct_sns_dma = 0;
2981        ha->ms_iocb = NULL;
2982        ha->ms_iocb_dma = 0;
2983        ha->init_cb = NULL;
2984        ha->init_cb_dma = 0;
2985        ha->ex_init_cb = NULL;
2986        ha->ex_init_cb_dma = 0;
2987        ha->async_pd = NULL;
2988        ha->async_pd_dma = 0;
2989
2990        ha->s_dma_pool = NULL;
2991        ha->dl_dma_pool = NULL;
2992        ha->fcp_cmnd_dma_pool = NULL;
2993
2994        ha->gid_list = NULL;
2995        ha->gid_list_dma = 0;
2996}
2997
2998struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2999                                                struct qla_hw_data *ha)
3000{
3001        struct Scsi_Host *host;
3002        struct scsi_qla_host *vha = NULL;
3003
3004        host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3005        if (host == NULL) {
3006                printk(KERN_WARNING
3007                "qla2xxx: Couldn't allocate host from scsi layer!\n");
3008                goto fail;
3009        }
3010
3011        /* Clear our data area */
3012        vha = shost_priv(host);
3013        memset(vha, 0, sizeof(scsi_qla_host_t));
3014
3015        vha->host = host;
3016        vha->host_no = host->host_no;
3017        vha->hw = ha;
3018
3019        INIT_LIST_HEAD(&vha->vp_fcports);
3020        INIT_LIST_HEAD(&vha->work_list);
3021        INIT_LIST_HEAD(&vha->list);
3022
3023        spin_lock_init(&vha->work_lock);
3024
3025        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3026        return vha;
3027
3028fail:
3029        return vha;
3030}
3031
3032static struct qla_work_evt *
3033qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
3034{
3035        struct qla_work_evt *e;
3036        uint8_t bail;
3037
3038        QLA_VHA_MARK_BUSY(vha, bail);
3039        if (bail)
3040                return NULL;
3041
3042        e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
3043        if (!e) {
3044                QLA_VHA_MARK_NOT_BUSY(vha);
3045                return NULL;
3046        }
3047
3048        INIT_LIST_HEAD(&e->list);
3049        e->type = type;
3050        e->flags = QLA_EVT_FLAG_FREE;
3051        return e;
3052}
3053
3054static int
3055qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
3056{
3057        unsigned long flags;
3058
3059        spin_lock_irqsave(&vha->work_lock, flags);
3060        list_add_tail(&e->list, &vha->work_list);
3061        spin_unlock_irqrestore(&vha->work_lock, flags);
3062        qla2xxx_wake_dpc(vha);
3063
3064        return QLA_SUCCESS;
3065}
3066
3067int
3068qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
3069    u32 data)
3070{
3071        struct qla_work_evt *e;
3072
3073        e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
3074        if (!e)
3075                return QLA_FUNCTION_FAILED;
3076
3077        e->u.aen.code = code;
3078        e->u.aen.data = data;
3079        return qla2x00_post_work(vha, e);
3080}
3081
3082int
3083qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
3084{
3085        struct qla_work_evt *e;
3086
3087        e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
3088        if (!e)
3089                return QLA_FUNCTION_FAILED;
3090
3091        memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3092        return qla2x00_post_work(vha, e);
3093}
3094
3095#define qla2x00_post_async_work(name, type)     \
3096int qla2x00_post_async_##name##_work(           \
3097    struct scsi_qla_host *vha,                  \
3098    fc_port_t *fcport, uint16_t *data)          \
3099{                                               \
3100        struct qla_work_evt *e;                 \
3101                                                \
3102        e = qla2x00_alloc_work(vha, type);      \
3103        if (!e)                                 \
3104                return QLA_FUNCTION_FAILED;     \
3105                                                \
3106        e->u.logio.fcport = fcport;             \
3107        if (data) {                             \
3108                e->u.logio.data[0] = data[0];   \
3109                e->u.logio.data[1] = data[1];   \
3110        }                                       \
3111        return qla2x00_post_work(vha, e);       \
3112}
3113
3114qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
3115qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
3116qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
3117qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
3118qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
3119qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
3120
3121int
3122qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
3123{
3124        struct qla_work_evt *e;
3125
3126        e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
3127        if (!e)
3128                return QLA_FUNCTION_FAILED;
3129
3130        e->u.uevent.code = code;
3131        return qla2x00_post_work(vha, e);
3132}
3133
3134static void
3135qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
3136{
3137        char event_string[40];
3138        char *envp[] = { event_string, NULL };
3139
3140        switch (code) {
3141        case QLA_UEVENT_CODE_FW_DUMP:
3142                snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
3143                    vha->host_no);
3144                break;
3145        default:
3146                /* do nothing */
3147                break;
3148        }
3149        kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
3150}
3151
3152void
3153qla2x00_do_work(struct scsi_qla_host *vha)
3154{
3155        struct qla_work_evt *e, *tmp;
3156        unsigned long flags;
3157        LIST_HEAD(work);
3158
3159        spin_lock_irqsave(&vha->work_lock, flags);
3160        list_splice_init(&vha->work_list, &work);
3161        spin_unlock_irqrestore(&vha->work_lock, flags);
3162
3163        list_for_each_entry_safe(e, tmp, &work, list) {
3164                list_del_init(&e->list);
3165
3166                switch (e->type) {
3167                case QLA_EVT_AEN:
3168                        fc_host_post_event(vha->host, fc_get_event_number(),
3169                            e->u.aen.code, e->u.aen.data);
3170                        break;
3171                case QLA_EVT_IDC_ACK:
3172                        qla81xx_idc_ack(vha, e->u.idc_ack.mb);
3173                        break;
3174                case QLA_EVT_ASYNC_LOGIN:
3175                        qla2x00_async_login(vha, e->u.logio.fcport,
3176                            e->u.logio.data);
3177                        break;
3178                case QLA_EVT_ASYNC_LOGIN_DONE:
3179                        qla2x00_async_login_done(vha, e->u.logio.fcport,
3180                            e->u.logio.data);
3181                        break;
3182                case QLA_EVT_ASYNC_LOGOUT:
3183                        qla2x00_async_logout(vha, e->u.logio.fcport);
3184                        break;
3185                case QLA_EVT_ASYNC_LOGOUT_DONE:
3186                        qla2x00_async_logout_done(vha, e->u.logio.fcport,
3187                            e->u.logio.data);
3188                        break;
3189                case QLA_EVT_ASYNC_ADISC:
3190                        qla2x00_async_adisc(vha, e->u.logio.fcport,
3191                            e->u.logio.data);
3192                        break;
3193                case QLA_EVT_ASYNC_ADISC_DONE:
3194                        qla2x00_async_adisc_done(vha, e->u.logio.fcport,
3195                            e->u.logio.data);
3196                        break;
3197                case QLA_EVT_UEVENT:
3198                        qla2x00_uevent_emit(vha, e->u.uevent.code);
3199                        break;
3200                }
3201                if (e->flags & QLA_EVT_FLAG_FREE)
3202                        kfree(e);
3203
3204                /* For each work completed decrement vha ref count */
3205                QLA_VHA_MARK_NOT_BUSY(vha);
3206        }
3207}
3208
3209/* Relogins all the fcports of a vport
3210 * Context: dpc thread
3211 */
3212void qla2x00_relogin(struct scsi_qla_host *vha)
3213{
3214        fc_port_t       *fcport;
3215        int status;
3216        uint16_t        next_loopid = 0;
3217        struct qla_hw_data *ha = vha->hw;
3218        uint16_t data[2];
3219
3220        list_for_each_entry(fcport, &vha->vp_fcports, list) {
3221        /*
3222         * If the port is not ONLINE then try to login
3223         * to it if we haven't run out of retries.
3224         */
3225                if (atomic_read(&fcport->state) != FCS_ONLINE &&
3226                    fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
3227                        fcport->login_retry--;
3228                        if (fcport->flags & FCF_FABRIC_DEVICE) {
3229                                if (fcport->flags & FCF_FCP2_DEVICE)
3230                                        ha->isp_ops->fabric_logout(vha,
3231                                                        fcport->loop_id,
3232                                                        fcport->d_id.b.domain,
3233                                                        fcport->d_id.b.area,
3234                                                        fcport->d_id.b.al_pa);
3235
3236                                if (fcport->loop_id == FC_NO_LOOP_ID) {
3237                                        fcport->loop_id = next_loopid =
3238                                            ha->min_external_loopid;
3239                                        status = qla2x00_find_new_loop_id(
3240                                            vha, fcport);
3241                                        if (status != QLA_SUCCESS) {
3242                                                /* Ran out of IDs to use */
3243                                                break;
3244                                        }
3245                                }
3246
3247                                if (IS_ALOGIO_CAPABLE(ha)) {
3248                                        fcport->flags |= FCF_ASYNC_SENT;
3249                                        data[0] = 0;
3250                                        data[1] = QLA_LOGIO_LOGIN_RETRIED;
3251                                        status = qla2x00_post_async_login_work(
3252                                            vha, fcport, data);
3253                                        if (status == QLA_SUCCESS)
3254                                                continue;
3255                                        /* Attempt a retry. */
3256                                        status = 1;
3257                                } else
3258                                        status = qla2x00_fabric_login(vha,
3259                                            fcport, &next_loopid);
3260                        } else
3261                                status = qla2x00_local_device_login(vha,
3262                                                                fcport);
3263
3264                        if (status == QLA_SUCCESS) {
3265                                fcport->old_loop_id = fcport->loop_id;
3266
3267                                DEBUG(printk("scsi(%ld): port login OK: logged "
3268                                "in ID 0x%x\n", vha->host_no, fcport->loop_id));
3269
3270                                qla2x00_update_fcport(vha, fcport);
3271
3272                        } else if (status == 1) {
3273                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3274                                /* retry the login again */
3275                                DEBUG(printk("scsi(%ld): Retrying"
3276                                " %d login again loop_id 0x%x\n",
3277                                vha->host_no, fcport->login_retry,
3278                                                fcport->loop_id));
3279                        } else {
3280                                fcport->login_retry = 0;
3281                        }
3282
3283                        if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3284                                fcport->loop_id = FC_NO_LOOP_ID;
3285                }
3286                if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3287                        break;
3288        }
3289}
3290
3291/**************************************************************************
3292* qla2x00_do_dpc
3293*   This kernel thread is a task that is schedule by the interrupt handler
3294*   to perform the background processing for interrupts.
3295*
3296* Notes:
3297* This task always run in the context of a kernel thread.  It
3298* is kick-off by the driver's detect code and starts up
3299* up one per adapter. It immediately goes to sleep and waits for
3300* some fibre event.  When either the interrupt handler or
3301* the timer routine detects a event it will one of the task
3302* bits then wake us up.
3303**************************************************************************/
3304static int
3305qla2x00_do_dpc(void *data)
3306{
3307        int             rval;
3308        scsi_qla_host_t *base_vha;
3309        struct qla_hw_data *ha;
3310
3311        ha = (struct qla_hw_data *)data;
3312        base_vha = pci_get_drvdata(ha->pdev);
3313
3314        set_user_nice(current, -20);
3315
3316        set_current_state(TASK_INTERRUPTIBLE);
3317        while (!kthread_should_stop()) {
3318                DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3319
3320                schedule();
3321                __set_current_state(TASK_RUNNING);
3322
3323                DEBUG3(printk("qla2x00: DPC handler waking up\n"));
3324
3325                /* Initialization not yet finished. Don't do anything yet. */
3326                if (!base_vha->flags.init_done)
3327                        continue;
3328
3329                if (ha->flags.eeh_busy) {
3330                        DEBUG17(qla_printk(KERN_WARNING, ha,
3331                            "qla2x00_do_dpc: dpc_flags: %lx\n",
3332                            base_vha->dpc_flags));
3333                        continue;
3334                }
3335
3336                DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3337
3338                ha->dpc_active = 1;
3339
3340                if (ha->flags.mbox_busy) {
3341                        ha->dpc_active = 0;
3342                        continue;
3343                }
3344
3345                qla2x00_do_work(base_vha);
3346
3347                if (IS_QLA82XX(ha)) {
3348                        if (test_and_clear_bit(ISP_UNRECOVERABLE,
3349                                &base_vha->dpc_flags)) {
3350                                qla82xx_idc_lock(ha);
3351                                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3352                                        QLA82XX_DEV_FAILED);
3353                                qla82xx_idc_unlock(ha);
3354                                qla_printk(KERN_INFO, ha,
3355                                        "HW State: FAILED\n");
3356                                qla82xx_device_state_handler(base_vha);
3357                                continue;
3358                        }
3359
3360                        if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3361                                &base_vha->dpc_flags)) {
3362
3363                                DEBUG(printk(KERN_INFO
3364                                        "scsi(%ld): dpc: sched "
3365                                        "qla82xx_fcoe_ctx_reset ha = %p\n",
3366                                        base_vha->host_no, ha));
3367                                if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3368                                        &base_vha->dpc_flags))) {
3369                                        if (qla82xx_fcoe_ctx_reset(base_vha)) {
3370                                                /* FCoE-ctx reset failed.
3371                                                 * Escalate to chip-reset
3372                                                 */
3373                                                set_bit(ISP_ABORT_NEEDED,
3374                                                        &base_vha->dpc_flags);
3375                                        }
3376                                        clear_bit(ABORT_ISP_ACTIVE,
3377                                                &base_vha->dpc_flags);
3378                                }
3379
3380                                DEBUG(printk("scsi(%ld): dpc:"
3381                                        " qla82xx_fcoe_ctx_reset end\n",
3382                                        base_vha->host_no));
3383                        }
3384                }
3385
3386                if (test_and_clear_bit(ISP_ABORT_NEEDED,
3387                                                &base_vha->dpc_flags)) {
3388
3389                        DEBUG(printk("scsi(%ld): dpc: sched "
3390                            "qla2x00_abort_isp ha = %p\n",
3391                            base_vha->host_no, ha));
3392                        if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3393                            &base_vha->dpc_flags))) {
3394
3395                                if (ha->isp_ops->abort_isp(base_vha)) {
3396                                        /* failed. retry later */
3397                                        set_bit(ISP_ABORT_NEEDED,
3398                                            &base_vha->dpc_flags);
3399                                }
3400                                clear_bit(ABORT_ISP_ACTIVE,
3401                                                &base_vha->dpc_flags);
3402                        }
3403
3404                        DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
3405                            base_vha->host_no));
3406                }
3407
3408                if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
3409                        qla2x00_update_fcports(base_vha);
3410                        clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3411                }
3412
3413                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3414                        DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
3415                            "qla2x00_quiesce_needed ha = %p\n",
3416                            base_vha->host_no, ha));
3417                        qla82xx_device_state_handler(base_vha);
3418                        clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3419                        if (!ha->flags.quiesce_owner) {
3420                                qla2x00_perform_loop_resync(base_vha);
3421
3422                                qla82xx_idc_lock(ha);
3423                                qla82xx_clear_qsnt_ready(base_vha);
3424                                qla82xx_idc_unlock(ha);
3425                        }
3426                }
3427
3428                if (test_and_clear_bit(RESET_MARKER_NEEDED,
3429                                                        &base_vha->dpc_flags) &&
3430                    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3431
3432                        DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
3433                            base_vha->host_no));
3434
3435                        qla2x00_rst_aen(base_vha);
3436                        clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3437                }
3438
3439                /* Retry each device up to login retry count */
3440                if ((test_and_clear_bit(RELOGIN_NEEDED,
3441                                                &base_vha->dpc_flags)) &&
3442                    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3443                    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3444
3445                        DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
3446                                        base_vha->host_no));
3447                        qla2x00_relogin(base_vha);
3448
3449                        DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
3450                            base_vha->host_no));
3451                }
3452
3453                if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3454                                                        &base_vha->dpc_flags)) {
3455
3456                        DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
3457                                base_vha->host_no));
3458
3459                        if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3460                            &base_vha->dpc_flags))) {
3461
3462                                rval = qla2x00_loop_resync(base_vha);
3463
3464                                clear_bit(LOOP_RESYNC_ACTIVE,
3465                                                &base_vha->dpc_flags);
3466                        }
3467
3468                        DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
3469                            base_vha->host_no));
3470                }
3471
3472                if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
3473                    atomic_read(&base_vha->loop_state) == LOOP_READY) {
3474                        clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
3475                        qla2xxx_flash_npiv_conf(base_vha);
3476                }
3477
3478                if (!ha->interrupts_on)
3479                        ha->isp_ops->enable_intrs(ha);
3480
3481                if (test_and_clear_bit(BEACON_BLINK_NEEDED,
3482                                        &base_vha->dpc_flags))
3483                        ha->isp_ops->beacon_blink(base_vha);
3484
3485                qla2x00_do_dpc_all_vps(base_vha);
3486
3487                ha->dpc_active = 0;
3488                set_current_state(TASK_INTERRUPTIBLE);
3489        } /* End of while(1) */
3490        __set_current_state(TASK_RUNNING);
3491
3492        DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
3493
3494        /*
3495         * Make sure that nobody tries to wake us up again.
3496         */
3497        ha->dpc_active = 0;
3498
3499        /* Cleanup any residual CTX SRBs. */
3500        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3501
3502        return 0;
3503}
3504
3505void
3506qla2xxx_wake_dpc(struct scsi_qla_host *vha)
3507{
3508        struct qla_hw_data *ha = vha->hw;
3509        struct task_struct *t = ha->dpc_thread;
3510
3511        if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
3512                wake_up_process(t);
3513}
3514
3515/*
3516*  qla2x00_rst_aen
3517*      Processes asynchronous reset.
3518*
3519* Input:
3520*      ha  = adapter block pointer.
3521*/
3522static void
3523qla2x00_rst_aen(scsi_qla_host_t *vha)
3524{
3525        if (vha->flags.online && !vha->flags.reset_active &&
3526            !atomic_read(&vha->loop_down_timer) &&
3527            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
3528                do {
3529                        clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3530
3531                        /*
3532                         * Issue marker command only when we are going to start
3533                         * the I/O.
3534                         */
3535                        vha->marker_needed = 1;
3536                } while (!atomic_read(&vha->loop_down_timer) &&
3537                    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
3538        }
3539}
3540
3541static void
3542qla2x00_sp_free_dma(srb_t *sp)
3543{
3544        struct scsi_cmnd *cmd = sp->cmd;
3545        struct qla_hw_data *ha = sp->fcport->vha->hw;
3546
3547        if (sp->flags & SRB_DMA_VALID) {
3548                scsi_dma_unmap(cmd);
3549                sp->flags &= ~SRB_DMA_VALID;
3550        }
3551
3552        if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3553                dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3554                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3555                sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3556        }
3557
3558        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3559                /* List assured to be having elements */
3560                qla2x00_clean_dsd_pool(ha, sp);
3561                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3562        }
3563
3564        if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3565                dma_pool_free(ha->dl_dma_pool, sp->ctx,
3566                    ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3567                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3568        }
3569
3570        CMD_SP(cmd) = NULL;
3571}
3572
3573static void
3574qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3575{
3576        struct scsi_cmnd *cmd = sp->cmd;
3577
3578        qla2x00_sp_free_dma(sp);
3579
3580        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3581                struct ct6_dsd *ctx = sp->ctx;
3582                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3583                        ctx->fcp_cmnd_dma);
3584                list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3585                ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3586                ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3587                mempool_free(sp->ctx, ha->ctx_mempool);
3588                sp->ctx = NULL;
3589        }
3590
3591        mempool_free(sp, ha->srb_mempool);
3592        cmd->scsi_done(cmd);
3593}
3594
3595void
3596qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3597{
3598        if (atomic_read(&sp->ref_count) == 0) {
3599                DEBUG2(qla_printk(KERN_WARNING, ha,
3600                    "SP reference-count to ZERO -- sp=%p\n", sp));
3601                DEBUG2(BUG());
3602                return;
3603        }
3604        if (!atomic_dec_and_test(&sp->ref_count))
3605                return;
3606        qla2x00_sp_final_compl(ha, sp);
3607}
3608
3609/**************************************************************************
3610*   qla2x00_timer
3611*
3612* Description:
3613*   One second timer
3614*
3615* Context: Interrupt
3616***************************************************************************/
3617void
3618qla2x00_timer(scsi_qla_host_t *vha)
3619{
3620        unsigned long   cpu_flags = 0;
3621        int             start_dpc = 0;
3622        int             index;
3623        srb_t           *sp;
3624        uint16_t        w;
3625        struct qla_hw_data *ha = vha->hw;
3626        struct req_que *req;
3627
3628        if (ha->flags.eeh_busy) {
3629                qla2x00_restart_timer(vha, WATCH_INTERVAL);
3630                return;
3631        }
3632
3633        /* Hardware read to raise pending EEH errors during mailbox waits. */
3634        if (!pci_channel_offline(ha->pdev))
3635                pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3636
3637        /* Make sure qla82xx_watchdog is run only for physical port */
3638        if (!vha->vp_idx && IS_QLA82XX(ha)) {
3639                if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3640                        start_dpc++;
3641                qla82xx_watchdog(vha);
3642        }
3643
3644        /* Loop down handler. */
3645        if (atomic_read(&vha->loop_down_timer) > 0 &&
3646            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
3647            !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
3648                && vha->flags.online) {
3649
3650                if (atomic_read(&vha->loop_down_timer) ==
3651                    vha->loop_down_abort_time) {
3652
3653                        DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3654                            "queues before time expire\n",
3655                            vha->host_no));
3656
3657                        if (!IS_QLA2100(ha) && vha->link_down_timeout)
3658                                atomic_set(&vha->loop_state, LOOP_DEAD);
3659
3660                        /*
3661                         * Schedule an ISP abort to return any FCP2-device
3662                         * commands.
3663                         */
3664                        /* NPIV - scan physical port only */
3665                        if (!vha->vp_idx) {
3666                                spin_lock_irqsave(&ha->hardware_lock,
3667                                    cpu_flags);
3668                                req = ha->req_q_map[0];
3669                                for (index = 1;
3670                                    index < MAX_OUTSTANDING_COMMANDS;
3671                                    index++) {
3672                                        fc_port_t *sfcp;
3673
3674                                        sp = req->outstanding_cmds[index];
3675                                        if (!sp)
3676                                                continue;
3677                                        if (sp->ctx && !IS_PROT_IO(sp))
3678                                                continue;
3679                                        sfcp = sp->fcport;
3680                                        if (!(sfcp->flags & FCF_FCP2_DEVICE))
3681                                                continue;
3682
3683                                        if (IS_QLA82XX(ha))
3684                                                set_bit(FCOE_CTX_RESET_NEEDED,
3685                                                        &vha->dpc_flags);
3686                                        else
3687                                                set_bit(ISP_ABORT_NEEDED,
3688                                                        &vha->dpc_flags);
3689                                        break;
3690                                }
3691                                spin_unlock_irqrestore(&ha->hardware_lock,
3692                                                                cpu_flags);
3693                        }
3694                        start_dpc++;
3695                }
3696
3697                /* if the loop has been down for 4 minutes, reinit adapter */
3698                if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3699                        if (!(vha->device_flags & DFLG_NO_CABLE)) {
3700                                DEBUG(printk("scsi(%ld): Loop down - "
3701                                    "aborting ISP.\n",
3702                                    vha->host_no));
3703                                qla_printk(KERN_WARNING, ha,
3704                                    "Loop down - aborting ISP.\n");
3705
3706                                if (IS_QLA82XX(ha))
3707                                        set_bit(FCOE_CTX_RESET_NEEDED,
3708                                                &vha->dpc_flags);
3709                                else
3710                                        set_bit(ISP_ABORT_NEEDED,
3711                                                &vha->dpc_flags);
3712                        }
3713                }
3714                DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
3715                    vha->host_no,
3716                    atomic_read(&vha->loop_down_timer)));
3717        }
3718
3719        /* Check if beacon LED needs to be blinked for physical host only */
3720        if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3721                set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3722                start_dpc++;
3723        }
3724
3725        /* Process any deferred work. */
3726        if (!list_empty(&vha->work_list))
3727                start_dpc++;
3728
3729        /* Schedule the DPC routine if needed */
3730        if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3731            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3732            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
3733            start_dpc ||
3734            test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3735            test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3736            test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3737            test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3738            test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3739            test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3740                qla2xxx_wake_dpc(vha);
3741
3742        qla2x00_restart_timer(vha, WATCH_INTERVAL);
3743}
3744
3745/* Firmware interface routines. */
3746
3747#define FW_BLOBS        8
3748#define FW_ISP21XX      0
3749#define FW_ISP22XX      1
3750#define FW_ISP2300      2
3751#define FW_ISP2322      3
3752#define FW_ISP24XX      4
3753#define FW_ISP25XX      5
3754#define FW_ISP81XX      6
3755#define FW_ISP82XX      7
3756
3757#define FW_FILE_ISP21XX "ql2100_fw.bin"
3758#define FW_FILE_ISP22XX "ql2200_fw.bin"
3759#define FW_FILE_ISP2300 "ql2300_fw.bin"
3760#define FW_FILE_ISP2322 "ql2322_fw.bin"
3761#define FW_FILE_ISP24XX "ql2400_fw.bin"
3762#define FW_FILE_ISP25XX "ql2500_fw.bin"
3763#define FW_FILE_ISP81XX "ql8100_fw.bin"
3764#define FW_FILE_ISP82XX "ql8200_fw.bin"
3765
3766static DEFINE_MUTEX(qla_fw_lock);
3767
3768static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3769        { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3770        { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3771        { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3772        { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3773        { .name = FW_FILE_ISP24XX, },
3774        { .name = FW_FILE_ISP25XX, },
3775        { .name = FW_FILE_ISP81XX, },
3776        { .name = FW_FILE_ISP82XX, },
3777};
3778
3779struct fw_blob *
3780qla2x00_request_firmware(scsi_qla_host_t *vha)
3781{
3782        struct qla_hw_data *ha = vha->hw;
3783        struct fw_blob *blob;
3784
3785        blob = NULL;
3786        if (IS_QLA2100(ha)) {
3787                blob = &qla_fw_blobs[FW_ISP21XX];
3788        } else if (IS_QLA2200(ha)) {
3789                blob = &qla_fw_blobs[FW_ISP22XX];
3790        } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3791                blob = &qla_fw_blobs[FW_ISP2300];
3792        } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
3793                blob = &qla_fw_blobs[FW_ISP2322];
3794        } else if (IS_QLA24XX_TYPE(ha)) {
3795                blob = &qla_fw_blobs[FW_ISP24XX];
3796        } else if (IS_QLA25XX(ha)) {
3797                blob = &qla_fw_blobs[FW_ISP25XX];
3798        } else if (IS_QLA81XX(ha)) {
3799                blob = &qla_fw_blobs[FW_ISP81XX];
3800        } else if (IS_QLA82XX(ha)) {
3801                blob = &qla_fw_blobs[FW_ISP82XX];
3802        }
3803
3804        mutex_lock(&qla_fw_lock);
3805        if (blob->fw)
3806                goto out;
3807
3808        if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3809                DEBUG2(printk("scsi(%ld): Failed to load firmware image "
3810                    "(%s).\n", vha->host_no, blob->name));
3811                blob->fw = NULL;
3812                blob = NULL;
3813                goto out;
3814        }
3815
3816out:
3817        mutex_unlock(&qla_fw_lock);
3818        return blob;
3819}
3820
3821static void
3822qla2x00_release_firmware(void)
3823{
3824        int idx;
3825
3826        mutex_lock(&qla_fw_lock);
3827        for (idx = 0; idx < FW_BLOBS; idx++)
3828                if (qla_fw_blobs[idx].fw)
3829                        release_firmware(qla_fw_blobs[idx].fw);
3830        mutex_unlock(&qla_fw_lock);
3831}
3832
3833static pci_ers_result_t
3834qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3835{
3836        scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3837        struct qla_hw_data *ha = vha->hw;
3838
3839        DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3840            state));
3841
3842        switch (state) {
3843        case pci_channel_io_normal:
3844                ha->flags.eeh_busy = 0;
3845                return PCI_ERS_RESULT_CAN_RECOVER;
3846        case pci_channel_io_frozen:
3847                ha->flags.eeh_busy = 1;
3848                /* For ISP82XX complete any pending mailbox cmd */
3849                if (IS_QLA82XX(ha)) {
3850                        ha->flags.isp82xx_fw_hung = 1;
3851                        if (ha->flags.mbox_busy) {
3852                                ha->flags.mbox_int = 1;
3853                                DEBUG2(qla_printk(KERN_ERR, ha,
3854                                        "Due to pci channel io frozen, doing premature "
3855                                        "completion of mbx command\n"));
3856                                complete(&ha->mbx_intr_comp);
3857                        }
3858                }
3859                qla2x00_free_irqs(vha);
3860                pci_disable_device(pdev);
3861                /* Return back all IOs */
3862                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3863                return PCI_ERS_RESULT_NEED_RESET;
3864        case pci_channel_io_perm_failure:
3865                ha->flags.pci_channel_io_perm_failure = 1;
3866                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3867                return PCI_ERS_RESULT_DISCONNECT;
3868        }
3869        return PCI_ERS_RESULT_NEED_RESET;
3870}
3871
3872static pci_ers_result_t
3873qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3874{
3875        int risc_paused = 0;
3876        uint32_t stat;
3877        unsigned long flags;
3878        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3879        struct qla_hw_data *ha = base_vha->hw;
3880        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3881        struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3882
3883        if (IS_QLA82XX(ha))
3884                return PCI_ERS_RESULT_RECOVERED;
3885
3886        spin_lock_irqsave(&ha->hardware_lock, flags);
3887        if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3888                stat = RD_REG_DWORD(&reg->hccr);
3889                if (stat & HCCR_RISC_PAUSE)
3890                        risc_paused = 1;
3891        } else if (IS_QLA23XX(ha)) {
3892                stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3893                if (stat & HSR_RISC_PAUSED)
3894                        risc_paused = 1;
3895        } else if (IS_FWI2_CAPABLE(ha)) {
3896                stat = RD_REG_DWORD(&reg24->host_status);
3897                if (stat & HSRX_RISC_PAUSED)
3898                        risc_paused = 1;
3899        }
3900        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3901
3902        if (risc_paused) {
3903                qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3904                    "Dumping firmware!\n");
3905                ha->isp_ops->fw_dump(base_vha, 0);
3906
3907                return PCI_ERS_RESULT_NEED_RESET;
3908        } else
3909                return PCI_ERS_RESULT_RECOVERED;
3910}
3911
3912uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3913{
3914        uint32_t rval = QLA_FUNCTION_FAILED;
3915        uint32_t drv_active = 0;
3916        struct qla_hw_data *ha = base_vha->hw;
3917        int fn;
3918        struct pci_dev *other_pdev = NULL;
3919
3920        DEBUG17(qla_printk(KERN_INFO, ha,
3921            "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
3922
3923        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3924
3925        if (base_vha->flags.online) {
3926                /* Abort all outstanding commands,
3927                 * so as to be requeued later */
3928                qla2x00_abort_isp_cleanup(base_vha);
3929        }
3930
3931
3932        fn = PCI_FUNC(ha->pdev->devfn);
3933        while (fn > 0) {
3934                fn--;
3935                DEBUG17(qla_printk(KERN_INFO, ha,
3936                    "Finding pci device at function = 0x%x\n", fn));
3937                other_pdev =
3938                    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3939                    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3940                    fn));
3941
3942                if (!other_pdev)
3943                        continue;
3944                if (atomic_read(&other_pdev->enable_cnt)) {
3945                        DEBUG17(qla_printk(KERN_INFO, ha,
3946                            "Found PCI func available and enabled at 0x%x\n",
3947                            fn));
3948                        pci_dev_put(other_pdev);
3949                        break;
3950                }
3951                pci_dev_put(other_pdev);
3952        }
3953
3954        if (!fn) {
3955                /* Reset owner */
3956                DEBUG17(qla_printk(KERN_INFO, ha,
3957                    "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
3958                qla82xx_idc_lock(ha);
3959
3960                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3961                    QLA82XX_DEV_INITIALIZING);
3962
3963                qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3964                    QLA82XX_IDC_VERSION);
3965
3966                drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3967                DEBUG17(qla_printk(KERN_INFO, ha,
3968                    "drv_active = 0x%x\n", drv_active));
3969
3970                qla82xx_idc_unlock(ha);
3971                /* Reset if device is not already reset
3972                 * drv_active would be 0 if a reset has already been done
3973                 */
3974                if (drv_active)
3975                        rval = qla82xx_start_firmware(base_vha);
3976                else
3977                        rval = QLA_SUCCESS;
3978                qla82xx_idc_lock(ha);
3979
3980                if (rval != QLA_SUCCESS) {
3981                        qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3982                        qla82xx_clear_drv_active(ha);
3983                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3984                            QLA82XX_DEV_FAILED);
3985                } else {
3986                        qla_printk(KERN_INFO, ha, "HW State: READY\n");
3987                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3988                            QLA82XX_DEV_READY);
3989                        qla82xx_idc_unlock(ha);
3990                        ha->flags.isp82xx_fw_hung = 0;
3991                        rval = qla82xx_restart_isp(base_vha);
3992                        qla82xx_idc_lock(ha);
3993                        /* Clear driver state register */
3994                        qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
3995                        qla82xx_set_drv_active(base_vha);
3996                }
3997                qla82xx_idc_unlock(ha);
3998        } else {
3999                DEBUG17(qla_printk(KERN_INFO, ha,
4000                    "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
4001                if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4002                    QLA82XX_DEV_READY)) {
4003                        ha->flags.isp82xx_fw_hung = 0;
4004                        rval = qla82xx_restart_isp(base_vha);
4005                        qla82xx_idc_lock(ha);
4006                        qla82xx_set_drv_active(base_vha);
4007                        qla82xx_idc_unlock(ha);
4008                }
4009        }
4010        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4011
4012        return rval;
4013}
4014
4015static pci_ers_result_t
4016qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4017{
4018        pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4019        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
4020        struct qla_hw_data *ha = base_vha->hw;
4021        struct rsp_que *rsp;
4022        int rc, retries = 10;
4023
4024        DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
4025
4026        /* Workaround: qla2xxx driver which access hardware earlier
4027         * needs error state to be pci_channel_io_online.
4028         * Otherwise mailbox command timesout.
4029         */
4030        pdev->error_state = pci_channel_io_normal;
4031
4032        pci_restore_state(pdev);
4033
4034        /* pci_restore_state() clears the saved_state flag of the device
4035         * save restored state which resets saved_state flag
4036         */
4037        pci_save_state(pdev);
4038
4039        if (ha->mem_only)
4040                rc = pci_enable_device_mem(pdev);
4041        else
4042                rc = pci_enable_device(pdev);
4043
4044        if (rc) {
4045                qla_printk(KERN_WARNING, ha,
4046                    "Can't re-enable PCI device after reset.\n");
4047                goto exit_slot_reset;
4048        }
4049
4050        rsp = ha->rsp_q_map[0];
4051        if (qla2x00_request_irqs(ha, rsp))
4052                goto exit_slot_reset;
4053
4054        if (ha->isp_ops->pci_config(base_vha))
4055                goto exit_slot_reset;
4056
4057        if (IS_QLA82XX(ha)) {
4058                if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
4059                        ret = PCI_ERS_RESULT_RECOVERED;
4060                        goto exit_slot_reset;
4061                } else
4062                        goto exit_slot_reset;
4063        }
4064
4065        while (ha->flags.mbox_busy && retries--)
4066                msleep(1000);
4067
4068        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4069        if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
4070                ret =  PCI_ERS_RESULT_RECOVERED;
4071        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4072
4073
4074exit_slot_reset:
4075        DEBUG17(qla_printk(KERN_WARNING, ha,
4076            "slot_reset-return:ret=%x\n", ret));
4077
4078        return ret;
4079}
4080
4081static void
4082qla2xxx_pci_resume(struct pci_dev *pdev)
4083{
4084        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
4085        struct qla_hw_data *ha = base_vha->hw;
4086        int ret;
4087
4088        DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
4089
4090        ret = qla2x00_wait_for_hba_online(base_vha);
4091        if (ret != QLA_SUCCESS) {
4092                qla_printk(KERN_ERR, ha,
4093                    "the device failed to resume I/O "
4094                    "from slot/link_reset");
4095        }
4096
4097        pci_cleanup_aer_uncorrect_error_status(pdev);
4098
4099        ha->flags.eeh_busy = 0;
4100}
4101
4102static struct pci_error_handlers qla2xxx_err_handler = {
4103        .error_detected = qla2xxx_pci_error_detected,
4104        .mmio_enabled = qla2xxx_pci_mmio_enabled,
4105        .slot_reset = qla2xxx_pci_slot_reset,
4106        .resume = qla2xxx_pci_resume,
4107};
4108
4109static struct pci_device_id qla2xxx_pci_tbl[] = {
4110        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
4111        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
4112        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
4113        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
4114        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
4115        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
4116        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
4117        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
4118        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4119        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
4120        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
4121        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
4122        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
4123        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4124        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
4125        { 0 },
4126};
4127MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
4128
4129static struct pci_driver qla2xxx_pci_driver = {
4130        .name           = QLA2XXX_DRIVER_NAME,
4131        .driver         = {
4132                .owner          = THIS_MODULE,
4133        },
4134        .id_table       = qla2xxx_pci_tbl,
4135        .probe          = qla2x00_probe_one,
4136        .remove         = qla2x00_remove_one,
4137        .shutdown       = qla2x00_shutdown,
4138        .err_handler    = &qla2xxx_err_handler,
4139};
4140
4141static struct file_operations apidev_fops = {
4142        .owner = THIS_MODULE,
4143        .llseek = noop_llseek,
4144};
4145
4146/**
4147 * qla2x00_module_init - Module initialization.
4148 **/
4149static int __init
4150qla2x00_module_init(void)
4151{
4152        int ret = 0;
4153
4154        /* Allocate cache for SRBs. */
4155        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4156            SLAB_HWCACHE_ALIGN, NULL);
4157        if (srb_cachep == NULL) {
4158                printk(KERN_ERR
4159                    "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
4160                return -ENOMEM;
4161        }
4162
4163        /* Derive version string. */
4164        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4165        if (ql2xextended_error_logging)
4166                strcat(qla2x00_version_str, "-debug");
4167
4168        qla2xxx_transport_template =
4169            fc_attach_transport(&qla2xxx_transport_functions);
4170        if (!qla2xxx_transport_template) {
4171                kmem_cache_destroy(srb_cachep);
4172                return -ENODEV;
4173        }
4174
4175        apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4176        if (apidev_major < 0) {
4177                printk(KERN_WARNING "qla2xxx: Unable to register char device "
4178                    "%s\n", QLA2XXX_APIDEV);
4179        }
4180
4181        qla2xxx_transport_vport_template =
4182            fc_attach_transport(&qla2xxx_transport_vport_functions);
4183        if (!qla2xxx_transport_vport_template) {
4184                kmem_cache_destroy(srb_cachep);
4185                fc_release_transport(qla2xxx_transport_template);
4186                return -ENODEV;
4187        }
4188
4189        printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
4190            qla2x00_version_str);
4191        ret = pci_register_driver(&qla2xxx_pci_driver);
4192        if (ret) {
4193                kmem_cache_destroy(srb_cachep);
4194                fc_release_transport(qla2xxx_transport_template);
4195                fc_release_transport(qla2xxx_transport_vport_template);
4196        }
4197        return ret;
4198}
4199
4200/**
4201 * qla2x00_module_exit - Module cleanup.
4202 **/
4203static void __exit
4204qla2x00_module_exit(void)
4205{
4206        unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
4207        pci_unregister_driver(&qla2xxx_pci_driver);
4208        qla2x00_release_firmware();
4209        kmem_cache_destroy(srb_cachep);
4210        if (ctx_cachep)
4211                kmem_cache_destroy(ctx_cachep);
4212        fc_release_transport(qla2xxx_transport_template);
4213        fc_release_transport(qla2xxx_transport_vport_template);
4214}
4215
4216module_init(qla2x00_module_init);
4217module_exit(qla2x00_module_exit);
4218
4219MODULE_AUTHOR("QLogic Corporation");
4220MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
4221MODULE_LICENSE("GPL");
4222MODULE_VERSION(QLA2XXX_VERSION);
4223MODULE_FIRMWARE(FW_FILE_ISP21XX);
4224MODULE_FIRMWARE(FW_FILE_ISP22XX);
4225MODULE_FIRMWARE(FW_FILE_ISP2300);
4226MODULE_FIRMWARE(FW_FILE_ISP2322);
4227MODULE_FIRMWARE(FW_FILE_ISP24XX);
4228MODULE_FIRMWARE(FW_FILE_ISP25XX);
4229