linux/drivers/scsi/qla2xxx/qla_os.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/moduleparam.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12#include <linux/kthread.h>
  13#include <linux/mutex.h>
  14#include <linux/kobject.h>
  15#include <linux/slab.h>
  16#include <linux/blk-mq-pci.h>
  17#include <scsi/scsi_tcq.h>
  18#include <scsi/scsicam.h>
  19#include <scsi/scsi_transport.h>
  20#include <scsi/scsi_transport_fc.h>
  21
  22#include "qla_target.h"
  23
  24/*
  25 * Driver version
  26 */
  27char qla2x00_version_str[40];
  28
  29static int apidev_major;
  30
  31/*
  32 * SRB allocation cache
  33 */
  34struct kmem_cache *srb_cachep;
  35
  36/*
  37 * CT6 CTX allocation cache
  38 */
  39static struct kmem_cache *ctx_cachep;
  40/*
  41 * error level for logging
  42 */
  43int ql_errlev = ql_log_all;
  44
  45static int ql2xenableclass2;
  46module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
  47MODULE_PARM_DESC(ql2xenableclass2,
  48                "Specify if Class 2 operations are supported from the very "
  49                "beginning. Default is 0 - class 2 not supported.");
  50
  51
  52int ql2xlogintimeout = 20;
  53module_param(ql2xlogintimeout, int, S_IRUGO);
  54MODULE_PARM_DESC(ql2xlogintimeout,
  55                "Login timeout value in seconds.");
  56
  57int qlport_down_retry;
  58module_param(qlport_down_retry, int, S_IRUGO);
  59MODULE_PARM_DESC(qlport_down_retry,
  60                "Maximum number of command retries to a port that returns "
  61                "a PORT-DOWN status.");
  62
  63int ql2xplogiabsentdevice;
  64module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  65MODULE_PARM_DESC(ql2xplogiabsentdevice,
  66                "Option to enable PLOGI to devices that are not present after "
  67                "a Fabric scan.  This is needed for several broken switches. "
  68                "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
  69
  70int ql2xloginretrycount = 0;
  71module_param(ql2xloginretrycount, int, S_IRUGO);
  72MODULE_PARM_DESC(ql2xloginretrycount,
  73                "Specify an alternate value for the NVRAM login retry count.");
  74
  75int ql2xallocfwdump = 1;
  76module_param(ql2xallocfwdump, int, S_IRUGO);
  77MODULE_PARM_DESC(ql2xallocfwdump,
  78                "Option to enable allocation of memory for a firmware dump "
  79                "during HBA initialization.  Memory allocation requirements "
  80                "vary by ISP type.  Default is 1 - allocate memory.");
  81
  82int ql2xextended_error_logging;
  83module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  84module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  85MODULE_PARM_DESC(ql2xextended_error_logging,
  86                "Option to enable extended error logging,\n"
  87                "\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
  88                "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
  89                "\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
  90                "\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
  91                "\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
  92                "\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
  93                "\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
  94                "\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
  95                "\t\t0x00008000 - Verbose.       0x00004000 - Target.\n"
  96                "\t\t0x00002000 - Target Mgmt.   0x00001000 - Target TMF.\n"
  97                "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
  98                "\t\t0x1e400000 - Preferred value for capturing essential "
  99                "debug information (equivalent to old "
 100                "ql2xextended_error_logging=1).\n"
 101                "\t\tDo LOGICAL OR of the value to enable more than one level");
 102
 103int ql2xshiftctondsd = 6;
 104module_param(ql2xshiftctondsd, int, S_IRUGO);
 105MODULE_PARM_DESC(ql2xshiftctondsd,
 106                "Set to control shifting of command type processing "
 107                "based on total number of SG elements.");
 108
 109int ql2xfdmienable=1;
 110module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 111module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 112MODULE_PARM_DESC(ql2xfdmienable,
 113                "Enables FDMI registrations. "
 114                "0 - no FDMI. Default is 1 - perform FDMI.");
 115
 116#define MAX_Q_DEPTH     32
 117static int ql2xmaxqdepth = MAX_Q_DEPTH;
 118module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 119MODULE_PARM_DESC(ql2xmaxqdepth,
 120                "Maximum queue depth to set for each LUN. "
 121                "Default is 32.");
 122
 123int ql2xenabledif = 2;
 124module_param(ql2xenabledif, int, S_IRUGO);
 125MODULE_PARM_DESC(ql2xenabledif,
 126                " Enable T10-CRC-DIF:\n"
 127                " Default is 2.\n"
 128                "  0 -- No DIF Support\n"
 129                "  1 -- Enable DIF for all types\n"
 130                "  2 -- Enable DIF for all types, except Type 0.\n");
 131
 132int ql2xenablehba_err_chk = 2;
 133module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 134MODULE_PARM_DESC(ql2xenablehba_err_chk,
 135                " Enable T10-CRC-DIF Error isolation by HBA:\n"
 136                " Default is 2.\n"
 137                "  0 -- Error isolation disabled\n"
 138                "  1 -- Error isolation enabled only for DIX Type 0\n"
 139                "  2 -- Error isolation enabled for all Types\n");
 140
 141int ql2xiidmaenable=1;
 142module_param(ql2xiidmaenable, int, S_IRUGO);
 143MODULE_PARM_DESC(ql2xiidmaenable,
 144                "Enables iIDMA settings "
 145                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 146
 147int ql2xmqsupport = 1;
 148module_param(ql2xmqsupport, int, S_IRUGO);
 149MODULE_PARM_DESC(ql2xmqsupport,
 150                "Enable on demand multiple queue pairs support "
 151                "Default is 1 for supported. "
 152                "Set it to 0 to turn off mq qpair support.");
 153
 154int ql2xfwloadbin;
 155module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
 156module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
 157MODULE_PARM_DESC(ql2xfwloadbin,
 158                "Option to specify location from which to load ISP firmware:.\n"
 159                " 2 -- load firmware via the request_firmware() (hotplug).\n"
 160                "      interface.\n"
 161                " 1 -- load firmware from flash.\n"
 162                " 0 -- use default semantics.\n");
 163
 164int ql2xetsenable;
 165module_param(ql2xetsenable, int, S_IRUGO);
 166MODULE_PARM_DESC(ql2xetsenable,
 167                "Enables firmware ETS burst."
 168                "Default is 0 - skip ETS enablement.");
 169
 170int ql2xdbwr = 1;
 171module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
 172MODULE_PARM_DESC(ql2xdbwr,
 173                "Option to specify scheme for request queue posting.\n"
 174                " 0 -- Regular doorbell.\n"
 175                " 1 -- CAMRAM doorbell (faster).\n");
 176
 177int ql2xtargetreset = 1;
 178module_param(ql2xtargetreset, int, S_IRUGO);
 179MODULE_PARM_DESC(ql2xtargetreset,
 180                 "Enable target reset."
 181                 "Default is 1 - use hw defaults.");
 182
 183int ql2xgffidenable;
 184module_param(ql2xgffidenable, int, S_IRUGO);
 185MODULE_PARM_DESC(ql2xgffidenable,
 186                "Enables GFF_ID checks of port type. "
 187                "Default is 0 - Do not use GFF_ID information.");
 188
 189int ql2xasynctmfenable;
 190module_param(ql2xasynctmfenable, int, S_IRUGO);
 191MODULE_PARM_DESC(ql2xasynctmfenable,
 192                "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
 193                "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
 194
 195int ql2xdontresethba;
 196module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
 197MODULE_PARM_DESC(ql2xdontresethba,
 198                "Option to specify reset behaviour.\n"
 199                " 0 (Default) -- Reset on failure.\n"
 200                " 1 -- Do not reset on failure.\n");
 201
 202uint64_t ql2xmaxlun = MAX_LUNS;
 203module_param(ql2xmaxlun, ullong, S_IRUGO);
 204MODULE_PARM_DESC(ql2xmaxlun,
 205                "Defines the maximum LU number to register with the SCSI "
 206                "midlayer. Default is 65535.");
 207
 208int ql2xmdcapmask = 0x1F;
 209module_param(ql2xmdcapmask, int, S_IRUGO);
 210MODULE_PARM_DESC(ql2xmdcapmask,
 211                "Set the Minidump driver capture mask level. "
 212                "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 213
 214int ql2xmdenable = 1;
 215module_param(ql2xmdenable, int, S_IRUGO);
 216MODULE_PARM_DESC(ql2xmdenable,
 217                "Enable/disable MiniDump. "
 218                "0 - MiniDump disabled. "
 219                "1 (Default) - MiniDump enabled.");
 220
 221int ql2xexlogins = 0;
 222module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
 223MODULE_PARM_DESC(ql2xexlogins,
 224                 "Number of extended Logins. "
 225                 "0 (Default)- Disabled.");
 226
 227int ql2xexchoffld = 0;
 228module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
 229MODULE_PARM_DESC(ql2xexchoffld,
 230                 "Number of exchanges to offload. "
 231                 "0 (Default)- Disabled.");
 232
 233int ql2xfwholdabts = 0;
 234module_param(ql2xfwholdabts, int, S_IRUGO);
 235MODULE_PARM_DESC(ql2xfwholdabts,
 236                "Allow FW to hold status IOCB until ABTS rsp received. "
 237                "0 (Default) Do not set fw option. "
 238                "1 - Set fw option to hold ABTS.");
 239
 240/*
 241 * SCSI host template entry points
 242 */
 243static int qla2xxx_slave_configure(struct scsi_device * device);
 244static int qla2xxx_slave_alloc(struct scsi_device *);
 245static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
 246static void qla2xxx_scan_start(struct Scsi_Host *);
 247static void qla2xxx_slave_destroy(struct scsi_device *);
 248static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 249static int qla2xxx_eh_abort(struct scsi_cmnd *);
 250static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
 251static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
 252static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
 253static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 254
 255static void qla2x00_clear_drv_active(struct qla_hw_data *);
 256static void qla2x00_free_device(scsi_qla_host_t *);
 257static void qla83xx_disable_laser(scsi_qla_host_t *vha);
 258static int qla2xxx_map_queues(struct Scsi_Host *shost);
 259
 260struct scsi_host_template qla2xxx_driver_template = {
 261        .module                 = THIS_MODULE,
 262        .name                   = QLA2XXX_DRIVER_NAME,
 263        .queuecommand           = qla2xxx_queuecommand,
 264
 265        .eh_abort_handler       = qla2xxx_eh_abort,
 266        .eh_device_reset_handler = qla2xxx_eh_device_reset,
 267        .eh_target_reset_handler = qla2xxx_eh_target_reset,
 268        .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
 269        .eh_host_reset_handler  = qla2xxx_eh_host_reset,
 270
 271        .slave_configure        = qla2xxx_slave_configure,
 272
 273        .slave_alloc            = qla2xxx_slave_alloc,
 274        .slave_destroy          = qla2xxx_slave_destroy,
 275        .scan_finished          = qla2xxx_scan_finished,
 276        .scan_start             = qla2xxx_scan_start,
 277        .change_queue_depth     = scsi_change_queue_depth,
 278        .map_queues             = qla2xxx_map_queues,
 279        .this_id                = -1,
 280        .cmd_per_lun            = 3,
 281        .use_clustering         = ENABLE_CLUSTERING,
 282        .sg_tablesize           = SG_ALL,
 283
 284        .max_sectors            = 0xFFFF,
 285        .shost_attrs            = qla2x00_host_attrs,
 286
 287        .supported_mode         = MODE_INITIATOR,
 288        .track_queue_depth      = 1,
 289};
 290
 291static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 292struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 293
 294/* TODO Convert to inlines
 295 *
 296 * Timer routines
 297 */
 298
 299__inline__ void
 300qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
 301{
 302        init_timer(&vha->timer);
 303        vha->timer.expires = jiffies + interval * HZ;
 304        vha->timer.data = (unsigned long)vha;
 305        vha->timer.function = (void (*)(unsigned long))func;
 306        add_timer(&vha->timer);
 307        vha->timer_active = 1;
 308}
 309
 310static inline void
 311qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
 312{
 313        /* Currently used for 82XX only. */
 314        if (vha->device_flags & DFLG_DEV_FAILED) {
 315                ql_dbg(ql_dbg_timer, vha, 0x600d,
 316                    "Device in a failed state, returning.\n");
 317                return;
 318        }
 319
 320        mod_timer(&vha->timer, jiffies + interval * HZ);
 321}
 322
 323static __inline__ void
 324qla2x00_stop_timer(scsi_qla_host_t *vha)
 325{
 326        del_timer_sync(&vha->timer);
 327        vha->timer_active = 0;
 328}
 329
 330static int qla2x00_do_dpc(void *data);
 331
 332static void qla2x00_rst_aen(scsi_qla_host_t *);
 333
 334static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 335        struct req_que **, struct rsp_que **);
 336static void qla2x00_free_fw_dump(struct qla_hw_data *);
 337static void qla2x00_mem_free(struct qla_hw_data *);
 338int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 339        struct qla_qpair *qpair);
 340
 341/* -------------------------------------------------------------------------- */
 342static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
 343                                struct rsp_que *rsp)
 344{
 345        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 346        ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
 347                                GFP_KERNEL);
 348        if (!ha->req_q_map) {
 349                ql_log(ql_log_fatal, vha, 0x003b,
 350                    "Unable to allocate memory for request queue ptrs.\n");
 351                goto fail_req_map;
 352        }
 353
 354        ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
 355                                GFP_KERNEL);
 356        if (!ha->rsp_q_map) {
 357                ql_log(ql_log_fatal, vha, 0x003c,
 358                    "Unable to allocate memory for response queue ptrs.\n");
 359                goto fail_rsp_map;
 360        }
 361
 362        if (ql2xmqsupport && ha->max_qpairs) {
 363                ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
 364                        GFP_KERNEL);
 365                if (!ha->queue_pair_map) {
 366                        ql_log(ql_log_fatal, vha, 0x0180,
 367                            "Unable to allocate memory for queue pair ptrs.\n");
 368                        goto fail_qpair_map;
 369                }
 370                ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
 371                if (ha->base_qpair == NULL) {
 372                        ql_log(ql_log_warn, vha, 0x0182,
 373                            "Failed to allocate base queue pair memory.\n");
 374                        goto fail_base_qpair;
 375                }
 376                ha->base_qpair->req = req;
 377                ha->base_qpair->rsp = rsp;
 378        }
 379
 380        /*
 381         * Make sure we record at least the request and response queue zero in
 382         * case we need to free them if part of the probe fails.
 383         */
 384        ha->rsp_q_map[0] = rsp;
 385        ha->req_q_map[0] = req;
 386        set_bit(0, ha->rsp_qid_map);
 387        set_bit(0, ha->req_qid_map);
 388        return 1;
 389
 390fail_base_qpair:
 391        kfree(ha->queue_pair_map);
 392fail_qpair_map:
 393        kfree(ha->rsp_q_map);
 394        ha->rsp_q_map = NULL;
 395fail_rsp_map:
 396        kfree(ha->req_q_map);
 397        ha->req_q_map = NULL;
 398fail_req_map:
 399        return -ENOMEM;
 400}
 401
 402static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 403{
 404        if (IS_QLAFX00(ha)) {
 405                if (req && req->ring_fx00)
 406                        dma_free_coherent(&ha->pdev->dev,
 407                            (req->length_fx00 + 1) * sizeof(request_t),
 408                            req->ring_fx00, req->dma_fx00);
 409        } else if (req && req->ring)
 410                dma_free_coherent(&ha->pdev->dev,
 411                (req->length + 1) * sizeof(request_t),
 412                req->ring, req->dma);
 413
 414        if (req)
 415                kfree(req->outstanding_cmds);
 416
 417        kfree(req);
 418        req = NULL;
 419}
 420
 421static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 422{
 423        if (IS_QLAFX00(ha)) {
 424                if (rsp && rsp->ring)
 425                        dma_free_coherent(&ha->pdev->dev,
 426                            (rsp->length_fx00 + 1) * sizeof(request_t),
 427                            rsp->ring_fx00, rsp->dma_fx00);
 428        } else if (rsp && rsp->ring) {
 429                dma_free_coherent(&ha->pdev->dev,
 430                (rsp->length + 1) * sizeof(response_t),
 431                rsp->ring, rsp->dma);
 432        }
 433        kfree(rsp);
 434        rsp = NULL;
 435}
 436
 437static void qla2x00_free_queues(struct qla_hw_data *ha)
 438{
 439        struct req_que *req;
 440        struct rsp_que *rsp;
 441        int cnt;
 442        unsigned long flags;
 443
 444        spin_lock_irqsave(&ha->hardware_lock, flags);
 445        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
 446                if (!test_bit(cnt, ha->req_qid_map))
 447                        continue;
 448
 449                req = ha->req_q_map[cnt];
 450                clear_bit(cnt, ha->req_qid_map);
 451                ha->req_q_map[cnt] = NULL;
 452
 453                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 454                qla2x00_free_req_que(ha, req);
 455                spin_lock_irqsave(&ha->hardware_lock, flags);
 456        }
 457        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 458
 459        kfree(ha->req_q_map);
 460        ha->req_q_map = NULL;
 461
 462
 463        spin_lock_irqsave(&ha->hardware_lock, flags);
 464        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
 465                if (!test_bit(cnt, ha->rsp_qid_map))
 466                        continue;
 467
 468                rsp = ha->rsp_q_map[cnt];
 469                clear_bit(cnt, ha->rsp_qid_map);
 470                ha->rsp_q_map[cnt] =  NULL;
 471                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 472                qla2x00_free_rsp_que(ha, rsp);
 473                spin_lock_irqsave(&ha->hardware_lock, flags);
 474        }
 475        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 476
 477        kfree(ha->rsp_q_map);
 478        ha->rsp_q_map = NULL;
 479}
 480
 481static char *
 482qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
 483{
 484        struct qla_hw_data *ha = vha->hw;
 485        static char *pci_bus_modes[] = {
 486                "33", "66", "100", "133",
 487        };
 488        uint16_t pci_bus;
 489
 490        strcpy(str, "PCI");
 491        pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
 492        if (pci_bus) {
 493                strcat(str, "-X (");
 494                strcat(str, pci_bus_modes[pci_bus]);
 495        } else {
 496                pci_bus = (ha->pci_attr & BIT_8) >> 8;
 497                strcat(str, " (");
 498                strcat(str, pci_bus_modes[pci_bus]);
 499        }
 500        strcat(str, " MHz)");
 501
 502        return (str);
 503}
 504
 505static char *
 506qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
 507{
 508        static char *pci_bus_modes[] = { "33", "66", "100", "133", };
 509        struct qla_hw_data *ha = vha->hw;
 510        uint32_t pci_bus;
 511
 512        if (pci_is_pcie(ha->pdev)) {
 513                char lwstr[6];
 514                uint32_t lstat, lspeed, lwidth;
 515
 516                pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
 517                lspeed = lstat & PCI_EXP_LNKCAP_SLS;
 518                lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 519
 520                strcpy(str, "PCIe (");
 521                switch (lspeed) {
 522                case 1:
 523                        strcat(str, "2.5GT/s ");
 524                        break;
 525                case 2:
 526                        strcat(str, "5.0GT/s ");
 527                        break;
 528                case 3:
 529                        strcat(str, "8.0GT/s ");
 530                        break;
 531                default:
 532                        strcat(str, "<unknown> ");
 533                        break;
 534                }
 535                snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
 536                strcat(str, lwstr);
 537
 538                return str;
 539        }
 540
 541        strcpy(str, "PCI");
 542        pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
 543        if (pci_bus == 0 || pci_bus == 8) {
 544                strcat(str, " (");
 545                strcat(str, pci_bus_modes[pci_bus >> 3]);
 546        } else {
 547                strcat(str, "-X ");
 548                if (pci_bus & BIT_2)
 549                        strcat(str, "Mode 2");
 550                else
 551                        strcat(str, "Mode 1");
 552                strcat(str, " (");
 553                strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
 554        }
 555        strcat(str, " MHz)");
 556
 557        return str;
 558}
 559
 560static char *
 561qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 562{
 563        char un_str[10];
 564        struct qla_hw_data *ha = vha->hw;
 565
 566        snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
 567            ha->fw_minor_version, ha->fw_subminor_version);
 568
 569        if (ha->fw_attributes & BIT_9) {
 570                strcat(str, "FLX");
 571                return (str);
 572        }
 573
 574        switch (ha->fw_attributes & 0xFF) {
 575        case 0x7:
 576                strcat(str, "EF");
 577                break;
 578        case 0x17:
 579                strcat(str, "TP");
 580                break;
 581        case 0x37:
 582                strcat(str, "IP");
 583                break;
 584        case 0x77:
 585                strcat(str, "VI");
 586                break;
 587        default:
 588                sprintf(un_str, "(%x)", ha->fw_attributes);
 589                strcat(str, un_str);
 590                break;
 591        }
 592        if (ha->fw_attributes & 0x100)
 593                strcat(str, "X");
 594
 595        return (str);
 596}
 597
 598static char *
 599qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 600{
 601        struct qla_hw_data *ha = vha->hw;
 602
 603        snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
 604            ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 605        return str;
 606}
 607
 608void
 609qla2x00_sp_free_dma(void *vha, void *ptr)
 610{
 611        srb_t *sp = (srb_t *)ptr;
 612        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 613        struct qla_hw_data *ha = sp->fcport->vha->hw;
 614        void *ctx = GET_CMD_CTX_SP(sp);
 615
 616        if (sp->flags & SRB_DMA_VALID) {
 617                scsi_dma_unmap(cmd);
 618                sp->flags &= ~SRB_DMA_VALID;
 619        }
 620
 621        if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
 622                dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
 623                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
 624                sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 625        }
 626
 627        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 628                /* List assured to be having elements */
 629                qla2x00_clean_dsd_pool(ha, sp, NULL);
 630                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 631        }
 632
 633        if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
 634                dma_pool_free(ha->dl_dma_pool, ctx,
 635                    ((struct crc_context *)ctx)->crc_ctx_dma);
 636                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 637        }
 638
 639        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
 640                struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
 641
 642                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 643                        ctx1->fcp_cmnd_dma);
 644                list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 645                ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 646                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 647                mempool_free(ctx1, ha->ctx_mempool);
 648                ctx1 = NULL;
 649        }
 650
 651        CMD_SP(cmd) = NULL;
 652        qla2x00_rel_sp(sp->fcport->vha, sp);
 653}
 654
 655void
 656qla2x00_sp_compl(void *data, void *ptr, int res)
 657{
 658        struct qla_hw_data *ha = (struct qla_hw_data *)data;
 659        srb_t *sp = (srb_t *)ptr;
 660        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 661
 662        cmd->result = res;
 663
 664        if (atomic_read(&sp->ref_count) == 0) {
 665                ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
 666                    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
 667                    sp, GET_CMD_SP(sp));
 668                if (ql2xextended_error_logging & ql_dbg_io)
 669                        WARN_ON(atomic_read(&sp->ref_count) == 0);
 670                return;
 671        }
 672        if (!atomic_dec_and_test(&sp->ref_count))
 673                return;
 674
 675        qla2x00_sp_free_dma(ha, sp);
 676        cmd->scsi_done(cmd);
 677}
 678
 679void
 680qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
 681{
 682        srb_t *sp = (srb_t *)ptr;
 683        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 684        struct qla_hw_data *ha = sp->fcport->vha->hw;
 685        void *ctx = GET_CMD_CTX_SP(sp);
 686
 687        if (sp->flags & SRB_DMA_VALID) {
 688                scsi_dma_unmap(cmd);
 689                sp->flags &= ~SRB_DMA_VALID;
 690        }
 691
 692        if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
 693                dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
 694                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
 695                sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 696        }
 697
 698        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 699                /* List assured to be having elements */
 700                qla2x00_clean_dsd_pool(ha, sp, NULL);
 701                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 702        }
 703
 704        if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
 705                dma_pool_free(ha->dl_dma_pool, ctx,
 706                    ((struct crc_context *)ctx)->crc_ctx_dma);
 707                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 708        }
 709
 710        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
 711                struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
 712
 713                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 714                    ctx1->fcp_cmnd_dma);
 715                list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 716                ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 717                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 718                mempool_free(ctx1, ha->ctx_mempool);
 719        }
 720
 721        CMD_SP(cmd) = NULL;
 722        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 723}
 724
 725void
 726qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
 727{
 728        srb_t *sp = (srb_t *)ptr;
 729        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 730
 731        cmd->result = res;
 732
 733        if (atomic_read(&sp->ref_count) == 0) {
 734                ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
 735                    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
 736                    sp, GET_CMD_SP(sp));
 737                if (ql2xextended_error_logging & ql_dbg_io)
 738                        WARN_ON(atomic_read(&sp->ref_count) == 0);
 739                return;
 740        }
 741        if (!atomic_dec_and_test(&sp->ref_count))
 742                return;
 743
 744        qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
 745        cmd->scsi_done(cmd);
 746}
 747
 748/* If we are SP1 here, we need to still take and release the host_lock as SP1
 749 * does not have the changes necessary to avoid taking host->host_lock.
 750 */
 751static int
 752qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 753{
 754        scsi_qla_host_t *vha = shost_priv(host);
 755        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 756        struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 757        struct qla_hw_data *ha = vha->hw;
 758        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 759        srb_t *sp;
 760        int rval;
 761        struct qla_qpair *qpair = NULL;
 762        uint32_t tag;
 763        uint16_t hwq;
 764
 765        if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
 766                cmd->result = DID_NO_CONNECT << 16;
 767                goto qc24_fail_command;
 768        }
 769
 770        if (ha->mqenable) {
 771                if (shost_use_blk_mq(vha->host)) {
 772                        tag = blk_mq_unique_tag(cmd->request);
 773                        hwq = blk_mq_unique_tag_to_hwq(tag);
 774                        qpair = ha->queue_pair_map[hwq];
 775                } else if (vha->vp_idx && vha->qpair) {
 776                        qpair = vha->qpair;
 777                }
 778
 779                if (qpair)
 780                        return qla2xxx_mqueuecommand(host, cmd, qpair);
 781        }
 782
 783        if (ha->flags.eeh_busy) {
 784                if (ha->flags.pci_channel_io_perm_failure) {
 785                        ql_dbg(ql_dbg_aer, vha, 0x9010,
 786                            "PCI Channel IO permanent failure, exiting "
 787                            "cmd=%p.\n", cmd);
 788                        cmd->result = DID_NO_CONNECT << 16;
 789                } else {
 790                        ql_dbg(ql_dbg_aer, vha, 0x9011,
 791                            "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
 792                        cmd->result = DID_REQUEUE << 16;
 793                }
 794                goto qc24_fail_command;
 795        }
 796
 797        rval = fc_remote_port_chkready(rport);
 798        if (rval) {
 799                cmd->result = rval;
 800                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
 801                    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
 802                    cmd, rval);
 803                goto qc24_fail_command;
 804        }
 805
 806        if (!vha->flags.difdix_supported &&
 807                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
 808                        ql_dbg(ql_dbg_io, vha, 0x3004,
 809                            "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
 810                            cmd);
 811                        cmd->result = DID_NO_CONNECT << 16;
 812                        goto qc24_fail_command;
 813        }
 814
 815        if (!fcport) {
 816                cmd->result = DID_NO_CONNECT << 16;
 817                goto qc24_fail_command;
 818        }
 819
 820        if (atomic_read(&fcport->state) != FCS_ONLINE) {
 821                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 822                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 823                        ql_dbg(ql_dbg_io, vha, 0x3005,
 824                            "Returning DNC, fcport_state=%d loop_state=%d.\n",
 825                            atomic_read(&fcport->state),
 826                            atomic_read(&base_vha->loop_state));
 827                        cmd->result = DID_NO_CONNECT << 16;
 828                        goto qc24_fail_command;
 829                }
 830                goto qc24_target_busy;
 831        }
 832
 833        /*
 834         * Return target busy if we've received a non-zero retry_delay_timer
 835         * in a FCP_RSP.
 836         */
 837        if (fcport->retry_delay_timestamp == 0) {
 838                /* retry delay not set */
 839        } else if (time_after(jiffies, fcport->retry_delay_timestamp))
 840                fcport->retry_delay_timestamp = 0;
 841        else
 842                goto qc24_target_busy;
 843
 844        sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 845        if (!sp)
 846                goto qc24_host_busy;
 847
 848        sp->u.scmd.cmd = cmd;
 849        sp->type = SRB_SCSI_CMD;
 850        atomic_set(&sp->ref_count, 1);
 851        CMD_SP(cmd) = (void *)sp;
 852        sp->free = qla2x00_sp_free_dma;
 853        sp->done = qla2x00_sp_compl;
 854
 855        rval = ha->isp_ops->start_scsi(sp);
 856        if (rval != QLA_SUCCESS) {
 857                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
 858                    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
 859                goto qc24_host_busy_free_sp;
 860        }
 861
 862        return 0;
 863
 864qc24_host_busy_free_sp:
 865        qla2x00_sp_free_dma(ha, sp);
 866
 867qc24_host_busy:
 868        return SCSI_MLQUEUE_HOST_BUSY;
 869
 870qc24_target_busy:
 871        return SCSI_MLQUEUE_TARGET_BUSY;
 872
 873qc24_fail_command:
 874        cmd->scsi_done(cmd);
 875
 876        return 0;
 877}
 878
 879/* For MQ supported I/O */
 880int
 881qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 882    struct qla_qpair *qpair)
 883{
 884        scsi_qla_host_t *vha = shost_priv(host);
 885        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 886        struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 887        struct qla_hw_data *ha = vha->hw;
 888        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 889        srb_t *sp;
 890        int rval;
 891
 892        rval = fc_remote_port_chkready(rport);
 893        if (rval) {
 894                cmd->result = rval;
 895                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
 896                    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
 897                    cmd, rval);
 898                goto qc24_fail_command;
 899        }
 900
 901        if (!fcport) {
 902                cmd->result = DID_NO_CONNECT << 16;
 903                goto qc24_fail_command;
 904        }
 905
 906        if (atomic_read(&fcport->state) != FCS_ONLINE) {
 907                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 908                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 909                        ql_dbg(ql_dbg_io, vha, 0x3077,
 910                            "Returning DNC, fcport_state=%d loop_state=%d.\n",
 911                            atomic_read(&fcport->state),
 912                            atomic_read(&base_vha->loop_state));
 913                        cmd->result = DID_NO_CONNECT << 16;
 914                        goto qc24_fail_command;
 915                }
 916                goto qc24_target_busy;
 917        }
 918
 919        /*
 920         * Return target busy if we've received a non-zero retry_delay_timer
 921         * in a FCP_RSP.
 922         */
 923        if (fcport->retry_delay_timestamp == 0) {
 924                /* retry delay not set */
 925        } else if (time_after(jiffies, fcport->retry_delay_timestamp))
 926                fcport->retry_delay_timestamp = 0;
 927        else
 928                goto qc24_target_busy;
 929
 930        sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
 931        if (!sp)
 932                goto qc24_host_busy;
 933
 934        sp->u.scmd.cmd = cmd;
 935        sp->type = SRB_SCSI_CMD;
 936        atomic_set(&sp->ref_count, 1);
 937        CMD_SP(cmd) = (void *)sp;
 938        sp->free = qla2xxx_qpair_sp_free_dma;
 939        sp->done = qla2xxx_qpair_sp_compl;
 940        sp->qpair = qpair;
 941
 942        rval = ha->isp_ops->start_scsi_mq(sp);
 943        if (rval != QLA_SUCCESS) {
 944                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
 945                    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
 946                if (rval == QLA_INTERFACE_ERROR)
 947                        goto qc24_fail_command;
 948                goto qc24_host_busy_free_sp;
 949        }
 950
 951        return 0;
 952
 953qc24_host_busy_free_sp:
 954        qla2xxx_qpair_sp_free_dma(vha, sp);
 955
 956qc24_host_busy:
 957        return SCSI_MLQUEUE_HOST_BUSY;
 958
 959qc24_target_busy:
 960        return SCSI_MLQUEUE_TARGET_BUSY;
 961
 962qc24_fail_command:
 963        cmd->scsi_done(cmd);
 964
 965        return 0;
 966}
 967
 968/*
 969 * qla2x00_eh_wait_on_command
 970 *    Waits for the command to be returned by the Firmware for some
 971 *    max time.
 972 *
 973 * Input:
 974 *    cmd = Scsi Command to wait on.
 975 *
 976 * Return:
 977 *    Not Found : 0
 978 *    Found : 1
 979 */
 980static int
 981qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
 982{
 983#define ABORT_POLLING_PERIOD    1000
 984#define ABORT_WAIT_ITER         ((2 * 1000) / (ABORT_POLLING_PERIOD))
 985        unsigned long wait_iter = ABORT_WAIT_ITER;
 986        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 987        struct qla_hw_data *ha = vha->hw;
 988        int ret = QLA_SUCCESS;
 989
 990        if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
 991                ql_dbg(ql_dbg_taskm, vha, 0x8005,
 992                    "Return:eh_wait.\n");
 993                return ret;
 994        }
 995
 996        while (CMD_SP(cmd) && wait_iter--) {
 997                msleep(ABORT_POLLING_PERIOD);
 998        }
 999        if (CMD_SP(cmd))
1000                ret = QLA_FUNCTION_FAILED;
1001
1002        return ret;
1003}
1004
1005/*
1006 * qla2x00_wait_for_hba_online
1007 *    Wait till the HBA is online after going through
1008 *    <= MAX_RETRIES_OF_ISP_ABORT  or
1009 *    finally HBA is disabled ie marked offline
1010 *
1011 * Input:
1012 *     ha - pointer to host adapter structure
1013 *
1014 * Note:
1015 *    Does context switching-Release SPIN_LOCK
1016 *    (if any) before calling this routine.
1017 *
1018 * Return:
1019 *    Success (Adapter is online) : 0
1020 *    Failed  (Adapter is offline/disabled) : 1
1021 */
1022int
1023qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1024{
1025        int             return_status;
1026        unsigned long   wait_online;
1027        struct qla_hw_data *ha = vha->hw;
1028        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1029
1030        wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1031        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1032            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1033            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1034            ha->dpc_active) && time_before(jiffies, wait_online)) {
1035
1036                msleep(1000);
1037        }
1038        if (base_vha->flags.online)
1039                return_status = QLA_SUCCESS;
1040        else
1041                return_status = QLA_FUNCTION_FAILED;
1042
1043        return (return_status);
1044}
1045
1046/*
1047 * qla2x00_wait_for_hba_ready
1048 * Wait till the HBA is ready before doing driver unload
1049 *
1050 * Input:
1051 *     ha - pointer to host adapter structure
1052 *
1053 * Note:
1054 *    Does context switching-Release SPIN_LOCK
1055 *    (if any) before calling this routine.
1056 *
1057 */
1058static void
1059qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
1060{
1061        struct qla_hw_data *ha = vha->hw;
1062        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1063
1064        while ((qla2x00_reset_active(vha) || ha->dpc_active ||
1065                ha->flags.mbox_busy) ||
1066               test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
1067               test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
1068                if (test_bit(UNLOADING, &base_vha->dpc_flags))
1069                        break;
1070                msleep(1000);
1071        }
1072}
1073
1074int
1075qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1076{
1077        int             return_status;
1078        unsigned long   wait_reset;
1079        struct qla_hw_data *ha = vha->hw;
1080        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1081
1082        wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1083        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1084            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1085            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1086            ha->dpc_active) && time_before(jiffies, wait_reset)) {
1087
1088                msleep(1000);
1089
1090                if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1091                    ha->flags.chip_reset_done)
1092                        break;
1093        }
1094        if (ha->flags.chip_reset_done)
1095                return_status = QLA_SUCCESS;
1096        else
1097                return_status = QLA_FUNCTION_FAILED;
1098
1099        return return_status;
1100}
1101
1102static void
1103sp_get(struct srb *sp)
1104{
1105        atomic_inc(&sp->ref_count);
1106}
1107
1108#define ISP_REG_DISCONNECT 0xffffffffU
1109/**************************************************************************
1110* qla2x00_isp_reg_stat
1111*
1112* Description:
1113*       Read the host status register of ISP before aborting the command.
1114*
1115* Input:
1116*       ha = pointer to host adapter structure.
1117*
1118*
1119* Returns:
1120*       Either true or false.
1121*
1122* Note: Return true if there is register disconnect.
1123**************************************************************************/
1124static inline
1125uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1126{
1127        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1128
1129        return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
1130}
1131
1132/**************************************************************************
1133* qla2xxx_eh_abort
1134*
1135* Description:
1136*    The abort function will abort the specified command.
1137*
1138* Input:
1139*    cmd = Linux SCSI command packet to be aborted.
1140*
1141* Returns:
1142*    Either SUCCESS or FAILED.
1143*
1144* Note:
1145*    Only return FAILED if command not returned by firmware.
1146**************************************************************************/
1147static int
1148qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1149{
1150        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1151        srb_t *sp;
1152        int ret;
1153        unsigned int id;
1154        uint64_t lun;
1155        unsigned long flags;
1156        int rval, wait = 0;
1157        struct qla_hw_data *ha = vha->hw;
1158
1159        if (qla2x00_isp_reg_stat(ha)) {
1160                ql_log(ql_log_info, vha, 0x8042,
1161                    "PCI/Register disconnect, exiting.\n");
1162                return FAILED;
1163        }
1164        if (!CMD_SP(cmd))
1165                return SUCCESS;
1166
1167        ret = fc_block_scsi_eh(cmd);
1168        if (ret != 0)
1169                return ret;
1170        ret = SUCCESS;
1171
1172        id = cmd->device->id;
1173        lun = cmd->device->lun;
1174
1175        spin_lock_irqsave(&ha->hardware_lock, flags);
1176        sp = (srb_t *) CMD_SP(cmd);
1177        if (!sp) {
1178                spin_unlock_irqrestore(&ha->hardware_lock, flags);
1179                return SUCCESS;
1180        }
1181
1182        ql_dbg(ql_dbg_taskm, vha, 0x8002,
1183            "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1184            vha->host_no, id, lun, sp, cmd, sp->handle);
1185
1186        /* Get a reference to the sp and drop the lock.*/
1187        sp_get(sp);
1188
1189        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190        rval = ha->isp_ops->abort_command(sp);
1191        if (rval) {
1192                if (rval == QLA_FUNCTION_PARAMETER_ERROR)
1193                        ret = SUCCESS;
1194                else
1195                        ret = FAILED;
1196
1197                ql_dbg(ql_dbg_taskm, vha, 0x8003,
1198                    "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
1199        } else {
1200                ql_dbg(ql_dbg_taskm, vha, 0x8004,
1201                    "Abort command mbx success cmd=%p.\n", cmd);
1202                wait = 1;
1203        }
1204
1205        spin_lock_irqsave(&ha->hardware_lock, flags);
1206        sp->done(ha, sp, 0);
1207        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1208
1209        /* Did the command return during mailbox execution? */
1210        if (ret == FAILED && !CMD_SP(cmd))
1211                ret = SUCCESS;
1212
1213        /* Wait for the command to be returned. */
1214        if (wait) {
1215                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
1216                        ql_log(ql_log_warn, vha, 0x8006,
1217                            "Abort handler timed out cmd=%p.\n", cmd);
1218                        ret = FAILED;
1219                }
1220        }
1221
1222        ql_log(ql_log_info, vha, 0x801c,
1223            "Abort command issued nexus=%ld:%d:%llu --  %d %x.\n",
1224            vha->host_no, id, lun, wait, ret);
1225
1226        return ret;
1227}
1228
1229int
1230qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1231        uint64_t l, enum nexus_wait_type type)
1232{
1233        int cnt, match, status;
1234        unsigned long flags;
1235        struct qla_hw_data *ha = vha->hw;
1236        struct req_que *req;
1237        srb_t *sp;
1238        struct scsi_cmnd *cmd;
1239
1240        status = QLA_SUCCESS;
1241
1242        spin_lock_irqsave(&ha->hardware_lock, flags);
1243        req = vha->req;
1244        for (cnt = 1; status == QLA_SUCCESS &&
1245                cnt < req->num_outstanding_cmds; cnt++) {
1246                sp = req->outstanding_cmds[cnt];
1247                if (!sp)
1248                        continue;
1249                if (sp->type != SRB_SCSI_CMD)
1250                        continue;
1251                if (vha->vp_idx != sp->fcport->vha->vp_idx)
1252                        continue;
1253                match = 0;
1254                cmd = GET_CMD_SP(sp);
1255                switch (type) {
1256                case WAIT_HOST:
1257                        match = 1;
1258                        break;
1259                case WAIT_TARGET:
1260                        match = cmd->device->id == t;
1261                        break;
1262                case WAIT_LUN:
1263                        match = (cmd->device->id == t &&
1264                                cmd->device->lun == l);
1265                        break;
1266                }
1267                if (!match)
1268                        continue;
1269
1270                spin_unlock_irqrestore(&ha->hardware_lock, flags);
1271                status = qla2x00_eh_wait_on_command(cmd);
1272                spin_lock_irqsave(&ha->hardware_lock, flags);
1273        }
1274        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1275
1276        return status;
1277}
1278
1279static char *reset_errors[] = {
1280        "HBA not online",
1281        "HBA not ready",
1282        "Task management failed",
1283        "Waiting for command completions",
1284};
1285
1286static int
1287__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1288    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
1289{
1290        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1291        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1292        int err;
1293
1294        if (!fcport) {
1295                return FAILED;
1296        }
1297
1298        err = fc_block_scsi_eh(cmd);
1299        if (err != 0)
1300                return err;
1301
1302        ql_log(ql_log_info, vha, 0x8009,
1303            "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
1304            cmd->device->id, cmd->device->lun, cmd);
1305
1306        err = 0;
1307        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1308                ql_log(ql_log_warn, vha, 0x800a,
1309                    "Wait for hba online failed for cmd=%p.\n", cmd);
1310                goto eh_reset_failed;
1311        }
1312        err = 2;
1313        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
1314                != QLA_SUCCESS) {
1315                ql_log(ql_log_warn, vha, 0x800c,
1316                    "do_reset failed for cmd=%p.\n", cmd);
1317                goto eh_reset_failed;
1318        }
1319        err = 3;
1320        if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
1321            cmd->device->lun, type) != QLA_SUCCESS) {
1322                ql_log(ql_log_warn, vha, 0x800d,
1323                    "wait for pending cmds failed for cmd=%p.\n", cmd);
1324                goto eh_reset_failed;
1325        }
1326
1327        ql_log(ql_log_info, vha, 0x800e,
1328            "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
1329            vha->host_no, cmd->device->id, cmd->device->lun, cmd);
1330
1331        return SUCCESS;
1332
1333eh_reset_failed:
1334        ql_log(ql_log_info, vha, 0x800f,
1335            "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
1336            reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1337            cmd);
1338        return FAILED;
1339}
1340
1341static int
1342qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1343{
1344        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1345        struct qla_hw_data *ha = vha->hw;
1346
1347        if (qla2x00_isp_reg_stat(ha)) {
1348                ql_log(ql_log_info, vha, 0x803e,
1349                    "PCI/Register disconnect, exiting.\n");
1350                return FAILED;
1351        }
1352
1353        return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
1354            ha->isp_ops->lun_reset);
1355}
1356
1357static int
1358qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1359{
1360        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1361        struct qla_hw_data *ha = vha->hw;
1362
1363        if (qla2x00_isp_reg_stat(ha)) {
1364                ql_log(ql_log_info, vha, 0x803f,
1365                    "PCI/Register disconnect, exiting.\n");
1366                return FAILED;
1367        }
1368
1369        return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1370            ha->isp_ops->target_reset);
1371}
1372
1373/**************************************************************************
1374* qla2xxx_eh_bus_reset
1375*
1376* Description:
1377*    The bus reset function will reset the bus and abort any executing
1378*    commands.
1379*
1380* Input:
1381*    cmd = Linux SCSI command packet of the command that cause the
1382*          bus reset.
1383*
1384* Returns:
1385*    SUCCESS/FAILURE (defined as macro in scsi.h).
1386*
1387**************************************************************************/
1388static int
1389qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1390{
1391        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1392        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1393        int ret = FAILED;
1394        unsigned int id;
1395        uint64_t lun;
1396        struct qla_hw_data *ha = vha->hw;
1397
1398        if (qla2x00_isp_reg_stat(ha)) {
1399                ql_log(ql_log_info, vha, 0x8040,
1400                    "PCI/Register disconnect, exiting.\n");
1401                return FAILED;
1402        }
1403
1404        id = cmd->device->id;
1405        lun = cmd->device->lun;
1406
1407        if (!fcport) {
1408                return ret;
1409        }
1410
1411        ret = fc_block_scsi_eh(cmd);
1412        if (ret != 0)
1413                return ret;
1414        ret = FAILED;
1415
1416        ql_log(ql_log_info, vha, 0x8012,
1417            "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1418
1419        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1420                ql_log(ql_log_fatal, vha, 0x8013,
1421                    "Wait for hba online failed board disabled.\n");
1422                goto eh_bus_reset_done;
1423        }
1424
1425        if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1426                ret = SUCCESS;
1427
1428        if (ret == FAILED)
1429                goto eh_bus_reset_done;
1430
1431        /* Flush outstanding commands. */
1432        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1433            QLA_SUCCESS) {
1434                ql_log(ql_log_warn, vha, 0x8014,
1435                    "Wait for pending commands failed.\n");
1436                ret = FAILED;
1437        }
1438
1439eh_bus_reset_done:
1440        ql_log(ql_log_warn, vha, 0x802b,
1441            "BUS RESET %s nexus=%ld:%d:%llu.\n",
1442            (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1443
1444        return ret;
1445}
1446
1447/**************************************************************************
1448* qla2xxx_eh_host_reset
1449*
1450* Description:
1451*    The reset function will reset the Adapter.
1452*
1453* Input:
1454*      cmd = Linux SCSI command packet of the command that cause the
1455*            adapter reset.
1456*
1457* Returns:
1458*      Either SUCCESS or FAILED.
1459*
1460* Note:
1461**************************************************************************/
1462static int
1463qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1464{
1465        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1466        struct qla_hw_data *ha = vha->hw;
1467        int ret = FAILED;
1468        unsigned int id;
1469        uint64_t lun;
1470        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1471
1472        if (qla2x00_isp_reg_stat(ha)) {
1473                ql_log(ql_log_info, vha, 0x8041,
1474                    "PCI/Register disconnect, exiting.\n");
1475                schedule_work(&ha->board_disable);
1476                return SUCCESS;
1477        }
1478
1479        id = cmd->device->id;
1480        lun = cmd->device->lun;
1481
1482        ql_log(ql_log_info, vha, 0x8018,
1483            "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1484
1485        /*
1486         * No point in issuing another reset if one is active.  Also do not
1487         * attempt a reset if we are updating flash.
1488         */
1489        if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1490                goto eh_host_reset_lock;
1491
1492        if (vha != base_vha) {
1493                if (qla2x00_vp_abort_isp(vha))
1494                        goto eh_host_reset_lock;
1495        } else {
1496                if (IS_P3P_TYPE(vha->hw)) {
1497                        if (!qla82xx_fcoe_ctx_reset(vha)) {
1498                                /* Ctx reset success */
1499                                ret = SUCCESS;
1500                                goto eh_host_reset_lock;
1501                        }
1502                        /* fall thru if ctx reset failed */
1503                }
1504                if (ha->wq)
1505                        flush_workqueue(ha->wq);
1506
1507                set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1508                if (ha->isp_ops->abort_isp(base_vha)) {
1509                        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1510                        /* failed. schedule dpc to try */
1511                        set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1512
1513                        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1514                                ql_log(ql_log_warn, vha, 0x802a,
1515                                    "wait for hba online failed.\n");
1516                                goto eh_host_reset_lock;
1517                        }
1518                }
1519                clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1520        }
1521
1522        /* Waiting for command to be returned to OS.*/
1523        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1524                QLA_SUCCESS)
1525                ret = SUCCESS;
1526
1527eh_host_reset_lock:
1528        ql_log(ql_log_info, vha, 0x8017,
1529            "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
1530            (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1531
1532        return ret;
1533}
1534
1535/*
1536* qla2x00_loop_reset
1537*      Issue loop reset.
1538*
1539* Input:
1540*      ha = adapter block pointer.
1541*
1542* Returns:
1543*      0 = success
1544*/
1545int
1546qla2x00_loop_reset(scsi_qla_host_t *vha)
1547{
1548        int ret;
1549        struct fc_port *fcport;
1550        struct qla_hw_data *ha = vha->hw;
1551
1552        if (IS_QLAFX00(ha)) {
1553                return qlafx00_loop_reset(vha);
1554        }
1555
1556        if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1557                list_for_each_entry(fcport, &vha->vp_fcports, list) {
1558                        if (fcport->port_type != FCT_TARGET)
1559                                continue;
1560
1561                        ret = ha->isp_ops->target_reset(fcport, 0, 0);
1562                        if (ret != QLA_SUCCESS) {
1563                                ql_dbg(ql_dbg_taskm, vha, 0x802c,
1564                                    "Bus Reset failed: Reset=%d "
1565                                    "d_id=%x.\n", ret, fcport->d_id.b24);
1566                        }
1567                }
1568        }
1569
1570
1571        if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1572                atomic_set(&vha->loop_state, LOOP_DOWN);
1573                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1574                qla2x00_mark_all_devices_lost(vha, 0);
1575                ret = qla2x00_full_login_lip(vha);
1576                if (ret != QLA_SUCCESS) {
1577                        ql_dbg(ql_dbg_taskm, vha, 0x802d,
1578                            "full_login_lip=%d.\n", ret);
1579                }
1580        }
1581
1582        if (ha->flags.enable_lip_reset) {
1583                ret = qla2x00_lip_reset(vha);
1584                if (ret != QLA_SUCCESS)
1585                        ql_dbg(ql_dbg_taskm, vha, 0x802e,
1586                            "lip_reset failed (%d).\n", ret);
1587        }
1588
1589        /* Issue marker command only when we are going to start the I/O */
1590        vha->marker_needed = 1;
1591
1592        return QLA_SUCCESS;
1593}
1594
1595void
1596qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1597{
1598        int que, cnt;
1599        unsigned long flags;
1600        srb_t *sp;
1601        struct qla_hw_data *ha = vha->hw;
1602        struct req_que *req;
1603
1604        qlt_host_reset_handler(ha);
1605
1606        spin_lock_irqsave(&ha->hardware_lock, flags);
1607        for (que = 0; que < ha->max_req_queues; que++) {
1608                req = ha->req_q_map[que];
1609                if (!req)
1610                        continue;
1611                if (!req->outstanding_cmds)
1612                        continue;
1613                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1614                        sp = req->outstanding_cmds[cnt];
1615                        if (sp) {
1616                                /* Don't abort commands in adapter during EEH
1617                                 * recovery as it's not accessible/responding.
1618                                 */
1619                                if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
1620                                        /* Get a reference to the sp and drop the lock.
1621                                         * The reference ensures this sp->done() call
1622                                         * - and not the call in qla2xxx_eh_abort() -
1623                                         * ends the SCSI command (with result 'res').
1624                                         */
1625                                        sp_get(sp);
1626                                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1627                                        qla2xxx_eh_abort(GET_CMD_SP(sp));
1628                                        spin_lock_irqsave(&ha->hardware_lock, flags);
1629                                }
1630                                req->outstanding_cmds[cnt] = NULL;
1631                                sp->done(vha, sp, res);
1632                        }
1633                }
1634        }
1635        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1636}
1637
1638static int
1639qla2xxx_slave_alloc(struct scsi_device *sdev)
1640{
1641        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1642
1643        if (!rport || fc_remote_port_chkready(rport))
1644                return -ENXIO;
1645
1646        sdev->hostdata = *(fc_port_t **)rport->dd_data;
1647
1648        return 0;
1649}
1650
1651static int
1652qla2xxx_slave_configure(struct scsi_device *sdev)
1653{
1654        scsi_qla_host_t *vha = shost_priv(sdev->host);
1655        struct req_que *req = vha->req;
1656
1657        if (IS_T10_PI_CAPABLE(vha->hw))
1658                blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1659
1660        scsi_change_queue_depth(sdev, req->max_q_depth);
1661        return 0;
1662}
1663
1664static void
1665qla2xxx_slave_destroy(struct scsi_device *sdev)
1666{
1667        sdev->hostdata = NULL;
1668}
1669
1670/**
1671 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1672 * @ha: HA context
1673 *
1674 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1675 * supported addressing method.
1676 */
1677static void
1678qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1679{
1680        /* Assume a 32bit DMA mask. */
1681        ha->flags.enable_64bit_addressing = 0;
1682
1683        if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1684                /* Any upper-dword bits set? */
1685                if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1686                    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1687                        /* Ok, a 64bit DMA mask is applicable. */
1688                        ha->flags.enable_64bit_addressing = 1;
1689                        ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1690                        ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1691                        return;
1692                }
1693        }
1694
1695        dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1696        pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1697}
1698
1699static void
1700qla2x00_enable_intrs(struct qla_hw_data *ha)
1701{
1702        unsigned long flags = 0;
1703        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1704
1705        spin_lock_irqsave(&ha->hardware_lock, flags);
1706        ha->interrupts_on = 1;
1707        /* enable risc and host interrupts */
1708        WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1709        RD_REG_WORD(&reg->ictrl);
1710        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711
1712}
1713
1714static void
1715qla2x00_disable_intrs(struct qla_hw_data *ha)
1716{
1717        unsigned long flags = 0;
1718        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1719
1720        spin_lock_irqsave(&ha->hardware_lock, flags);
1721        ha->interrupts_on = 0;
1722        /* disable risc and host interrupts */
1723        WRT_REG_WORD(&reg->ictrl, 0);
1724        RD_REG_WORD(&reg->ictrl);
1725        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1726}
1727
1728static void
1729qla24xx_enable_intrs(struct qla_hw_data *ha)
1730{
1731        unsigned long flags = 0;
1732        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1733
1734        spin_lock_irqsave(&ha->hardware_lock, flags);
1735        ha->interrupts_on = 1;
1736        WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1737        RD_REG_DWORD(&reg->ictrl);
1738        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1739}
1740
1741static void
1742qla24xx_disable_intrs(struct qla_hw_data *ha)
1743{
1744        unsigned long flags = 0;
1745        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1746
1747        if (IS_NOPOLLING_TYPE(ha))
1748                return;
1749        spin_lock_irqsave(&ha->hardware_lock, flags);
1750        ha->interrupts_on = 0;
1751        WRT_REG_DWORD(&reg->ictrl, 0);
1752        RD_REG_DWORD(&reg->ictrl);
1753        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1754}
1755
1756static int
1757qla2x00_iospace_config(struct qla_hw_data *ha)
1758{
1759        resource_size_t pio;
1760        uint16_t msix;
1761
1762        if (pci_request_selected_regions(ha->pdev, ha->bars,
1763            QLA2XXX_DRIVER_NAME)) {
1764                ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1765                    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1766                    pci_name(ha->pdev));
1767                goto iospace_error_exit;
1768        }
1769        if (!(ha->bars & 1))
1770                goto skip_pio;
1771
1772        /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1773        pio = pci_resource_start(ha->pdev, 0);
1774        if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1775                if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1776                        ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1777                            "Invalid pci I/O region size (%s).\n",
1778                            pci_name(ha->pdev));
1779                        pio = 0;
1780                }
1781        } else {
1782                ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1783                    "Region #0 no a PIO resource (%s).\n",
1784                    pci_name(ha->pdev));
1785                pio = 0;
1786        }
1787        ha->pio_address = pio;
1788        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1789            "PIO address=%llu.\n",
1790            (unsigned long long)ha->pio_address);
1791
1792skip_pio:
1793        /* Use MMIO operations for all accesses. */
1794        if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1795                ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1796                    "Region #1 not an MMIO resource (%s), aborting.\n",
1797                    pci_name(ha->pdev));
1798                goto iospace_error_exit;
1799        }
1800        if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1801                ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1802                    "Invalid PCI mem region size (%s), aborting.\n",
1803                    pci_name(ha->pdev));
1804                goto iospace_error_exit;
1805        }
1806
1807        ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1808        if (!ha->iobase) {
1809                ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1810                    "Cannot remap MMIO (%s), aborting.\n",
1811                    pci_name(ha->pdev));
1812                goto iospace_error_exit;
1813        }
1814
1815        /* Determine queue resources */
1816        ha->max_req_queues = ha->max_rsp_queues = 1;
1817        if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1818                goto mqiobase_exit;
1819
1820        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1821                        pci_resource_len(ha->pdev, 3));
1822        if (ha->mqiobase) {
1823                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1824                    "MQIO Base=%p.\n", ha->mqiobase);
1825                /* Read MSIX vector size of the board */
1826                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1827                ha->msix_count = msix + 1;
1828                /* Max queues are bounded by available msix vectors */
1829                /* MB interrupt uses 1 vector */
1830                ha->max_req_queues = ha->msix_count - 1;
1831                ha->max_rsp_queues = ha->max_req_queues;
1832                /* Queue pairs is the max value minus the base queue pair */
1833                ha->max_qpairs = ha->max_rsp_queues - 1;
1834                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
1835                    "Max no of queues pairs: %d.\n", ha->max_qpairs);
1836
1837                ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1838                    "MSI-X vector count: %d.\n", ha->msix_count);
1839        } else
1840                ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1841                    "BAR 3 not enabled.\n");
1842
1843mqiobase_exit:
1844        ha->msix_count = ha->max_rsp_queues + 1;
1845        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1846            "MSIX Count:%d.\n", ha->msix_count);
1847        return (0);
1848
1849iospace_error_exit:
1850        return (-ENOMEM);
1851}
1852
1853
1854static int
1855qla83xx_iospace_config(struct qla_hw_data *ha)
1856{
1857        uint16_t msix;
1858
1859        if (pci_request_selected_regions(ha->pdev, ha->bars,
1860            QLA2XXX_DRIVER_NAME)) {
1861                ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
1862                    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1863                    pci_name(ha->pdev));
1864
1865                goto iospace_error_exit;
1866        }
1867
1868        /* Use MMIO operations for all accesses. */
1869        if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1870                ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
1871                    "Invalid pci I/O region size (%s).\n",
1872                    pci_name(ha->pdev));
1873                goto iospace_error_exit;
1874        }
1875        if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1876                ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
1877                    "Invalid PCI mem region size (%s), aborting\n",
1878                        pci_name(ha->pdev));
1879                goto iospace_error_exit;
1880        }
1881
1882        ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
1883        if (!ha->iobase) {
1884                ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
1885                    "Cannot remap MMIO (%s), aborting.\n",
1886                    pci_name(ha->pdev));
1887                goto iospace_error_exit;
1888        }
1889
1890        /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
1891        /* 83XX 26XX always use MQ type access for queues
1892         * - mbar 2, a.k.a region 4 */
1893        ha->max_req_queues = ha->max_rsp_queues = 1;
1894        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
1895                        pci_resource_len(ha->pdev, 4));
1896
1897        if (!ha->mqiobase) {
1898                ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
1899                    "BAR2/region4 not enabled\n");
1900                goto mqiobase_exit;
1901        }
1902
1903        ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
1904                        pci_resource_len(ha->pdev, 2));
1905        if (ha->msixbase) {
1906                /* Read MSIX vector size of the board */
1907                pci_read_config_word(ha->pdev,
1908                    QLA_83XX_PCI_MSIX_CONTROL, &msix);
1909                ha->msix_count = msix + 1;
1910                /*
1911                 * By default, driver uses at least two msix vectors
1912                 * (default & rspq)
1913                 */
1914                if (ql2xmqsupport) {
1915                        /* MB interrupt uses 1 vector */
1916                        ha->max_req_queues = ha->msix_count - 1;
1917                        ha->max_rsp_queues = ha->max_req_queues;
1918
1919                        /* ATIOQ needs 1 vector. That's 1 less QPair */
1920                        if (QLA_TGT_MODE_ENABLED())
1921                                ha->max_req_queues--;
1922
1923                        /* Queue pairs is the max value minus
1924                         * the base queue pair */
1925                        ha->max_qpairs = ha->max_req_queues - 1;
1926                        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
1927                            "Max no of queues pairs: %d.\n", ha->max_qpairs);
1928                }
1929                ql_log_pci(ql_log_info, ha->pdev, 0x011c,
1930                    "MSI-X vector count: %d.\n", ha->msix_count);
1931        } else
1932                ql_log_pci(ql_log_info, ha->pdev, 0x011e,
1933                    "BAR 1 not enabled.\n");
1934
1935mqiobase_exit:
1936        ha->msix_count = ha->max_rsp_queues + 1;
1937        if (QLA_TGT_MODE_ENABLED())
1938                ha->msix_count++;
1939
1940        qlt_83xx_iospace_config(ha);
1941
1942        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
1943            "MSIX Count:%d.\n", ha->msix_count);
1944        return 0;
1945
1946iospace_error_exit:
1947        return -ENOMEM;
1948}
1949
1950static struct isp_operations qla2100_isp_ops = {
1951        .pci_config             = qla2100_pci_config,
1952        .reset_chip             = qla2x00_reset_chip,
1953        .chip_diag              = qla2x00_chip_diag,
1954        .config_rings           = qla2x00_config_rings,
1955        .reset_adapter          = qla2x00_reset_adapter,
1956        .nvram_config           = qla2x00_nvram_config,
1957        .update_fw_options      = qla2x00_update_fw_options,
1958        .load_risc              = qla2x00_load_risc,
1959        .pci_info_str           = qla2x00_pci_info_str,
1960        .fw_version_str         = qla2x00_fw_version_str,
1961        .intr_handler           = qla2100_intr_handler,
1962        .enable_intrs           = qla2x00_enable_intrs,
1963        .disable_intrs          = qla2x00_disable_intrs,
1964        .abort_command          = qla2x00_abort_command,
1965        .target_reset           = qla2x00_abort_target,
1966        .lun_reset              = qla2x00_lun_reset,
1967        .fabric_login           = qla2x00_login_fabric,
1968        .fabric_logout          = qla2x00_fabric_logout,
1969        .calc_req_entries       = qla2x00_calc_iocbs_32,
1970        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1971        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1972        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1973        .read_nvram             = qla2x00_read_nvram_data,
1974        .write_nvram            = qla2x00_write_nvram_data,
1975        .fw_dump                = qla2100_fw_dump,
1976        .beacon_on              = NULL,
1977        .beacon_off             = NULL,
1978        .beacon_blink           = NULL,
1979        .read_optrom            = qla2x00_read_optrom_data,
1980        .write_optrom           = qla2x00_write_optrom_data,
1981        .get_flash_version      = qla2x00_get_flash_version,
1982        .start_scsi             = qla2x00_start_scsi,
1983        .start_scsi_mq          = NULL,
1984        .abort_isp              = qla2x00_abort_isp,
1985        .iospace_config         = qla2x00_iospace_config,
1986        .initialize_adapter     = qla2x00_initialize_adapter,
1987};
1988
1989static struct isp_operations qla2300_isp_ops = {
1990        .pci_config             = qla2300_pci_config,
1991        .reset_chip             = qla2x00_reset_chip,
1992        .chip_diag              = qla2x00_chip_diag,
1993        .config_rings           = qla2x00_config_rings,
1994        .reset_adapter          = qla2x00_reset_adapter,
1995        .nvram_config           = qla2x00_nvram_config,
1996        .update_fw_options      = qla2x00_update_fw_options,
1997        .load_risc              = qla2x00_load_risc,
1998        .pci_info_str           = qla2x00_pci_info_str,
1999        .fw_version_str         = qla2x00_fw_version_str,
2000        .intr_handler           = qla2300_intr_handler,
2001        .enable_intrs           = qla2x00_enable_intrs,
2002        .disable_intrs          = qla2x00_disable_intrs,
2003        .abort_command          = qla2x00_abort_command,
2004        .target_reset           = qla2x00_abort_target,
2005        .lun_reset              = qla2x00_lun_reset,
2006        .fabric_login           = qla2x00_login_fabric,
2007        .fabric_logout          = qla2x00_fabric_logout,
2008        .calc_req_entries       = qla2x00_calc_iocbs_32,
2009        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
2010        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
2011        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
2012        .read_nvram             = qla2x00_read_nvram_data,
2013        .write_nvram            = qla2x00_write_nvram_data,
2014        .fw_dump                = qla2300_fw_dump,
2015        .beacon_on              = qla2x00_beacon_on,
2016        .beacon_off             = qla2x00_beacon_off,
2017        .beacon_blink           = qla2x00_beacon_blink,
2018        .read_optrom            = qla2x00_read_optrom_data,
2019        .write_optrom           = qla2x00_write_optrom_data,
2020        .get_flash_version      = qla2x00_get_flash_version,
2021        .start_scsi             = qla2x00_start_scsi,
2022        .start_scsi_mq          = NULL,
2023        .abort_isp              = qla2x00_abort_isp,
2024        .iospace_config         = qla2x00_iospace_config,
2025        .initialize_adapter     = qla2x00_initialize_adapter,
2026};
2027
2028static struct isp_operations qla24xx_isp_ops = {
2029        .pci_config             = qla24xx_pci_config,
2030        .reset_chip             = qla24xx_reset_chip,
2031        .chip_diag              = qla24xx_chip_diag,
2032        .config_rings           = qla24xx_config_rings,
2033        .reset_adapter          = qla24xx_reset_adapter,
2034        .nvram_config           = qla24xx_nvram_config,
2035        .update_fw_options      = qla24xx_update_fw_options,
2036        .load_risc              = qla24xx_load_risc,
2037        .pci_info_str           = qla24xx_pci_info_str,
2038        .fw_version_str         = qla24xx_fw_version_str,
2039        .intr_handler           = qla24xx_intr_handler,
2040        .enable_intrs           = qla24xx_enable_intrs,
2041        .disable_intrs          = qla24xx_disable_intrs,
2042        .abort_command          = qla24xx_abort_command,
2043        .target_reset           = qla24xx_abort_target,
2044        .lun_reset              = qla24xx_lun_reset,
2045        .fabric_login           = qla24xx_login_fabric,
2046        .fabric_logout          = qla24xx_fabric_logout,
2047        .calc_req_entries       = NULL,
2048        .build_iocbs            = NULL,
2049        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2050        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2051        .read_nvram             = qla24xx_read_nvram_data,
2052        .write_nvram            = qla24xx_write_nvram_data,
2053        .fw_dump                = qla24xx_fw_dump,
2054        .beacon_on              = qla24xx_beacon_on,
2055        .beacon_off             = qla24xx_beacon_off,
2056        .beacon_blink           = qla24xx_beacon_blink,
2057        .read_optrom            = qla24xx_read_optrom_data,
2058        .write_optrom           = qla24xx_write_optrom_data,
2059        .get_flash_version      = qla24xx_get_flash_version,
2060        .start_scsi             = qla24xx_start_scsi,
2061        .start_scsi_mq          = NULL,
2062        .abort_isp              = qla2x00_abort_isp,
2063        .iospace_config         = qla2x00_iospace_config,
2064        .initialize_adapter     = qla2x00_initialize_adapter,
2065};
2066
2067static struct isp_operations qla25xx_isp_ops = {
2068        .pci_config             = qla25xx_pci_config,
2069        .reset_chip             = qla24xx_reset_chip,
2070        .chip_diag              = qla24xx_chip_diag,
2071        .config_rings           = qla24xx_config_rings,
2072        .reset_adapter          = qla24xx_reset_adapter,
2073        .nvram_config           = qla24xx_nvram_config,
2074        .update_fw_options      = qla24xx_update_fw_options,
2075        .load_risc              = qla24xx_load_risc,
2076        .pci_info_str           = qla24xx_pci_info_str,
2077        .fw_version_str         = qla24xx_fw_version_str,
2078        .intr_handler           = qla24xx_intr_handler,
2079        .enable_intrs           = qla24xx_enable_intrs,
2080        .disable_intrs          = qla24xx_disable_intrs,
2081        .abort_command          = qla24xx_abort_command,
2082        .target_reset           = qla24xx_abort_target,
2083        .lun_reset              = qla24xx_lun_reset,
2084        .fabric_login           = qla24xx_login_fabric,
2085        .fabric_logout          = qla24xx_fabric_logout,
2086        .calc_req_entries       = NULL,
2087        .build_iocbs            = NULL,
2088        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2089        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2090        .read_nvram             = qla25xx_read_nvram_data,
2091        .write_nvram            = qla25xx_write_nvram_data,
2092        .fw_dump                = qla25xx_fw_dump,
2093        .beacon_on              = qla24xx_beacon_on,
2094        .beacon_off             = qla24xx_beacon_off,
2095        .beacon_blink           = qla24xx_beacon_blink,
2096        .read_optrom            = qla25xx_read_optrom_data,
2097        .write_optrom           = qla24xx_write_optrom_data,
2098        .get_flash_version      = qla24xx_get_flash_version,
2099        .start_scsi             = qla24xx_dif_start_scsi,
2100        .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2101        .abort_isp              = qla2x00_abort_isp,
2102        .iospace_config         = qla2x00_iospace_config,
2103        .initialize_adapter     = qla2x00_initialize_adapter,
2104};
2105
2106static struct isp_operations qla81xx_isp_ops = {
2107        .pci_config             = qla25xx_pci_config,
2108        .reset_chip             = qla24xx_reset_chip,
2109        .chip_diag              = qla24xx_chip_diag,
2110        .config_rings           = qla24xx_config_rings,
2111        .reset_adapter          = qla24xx_reset_adapter,
2112        .nvram_config           = qla81xx_nvram_config,
2113        .update_fw_options      = qla81xx_update_fw_options,
2114        .load_risc              = qla81xx_load_risc,
2115        .pci_info_str           = qla24xx_pci_info_str,
2116        .fw_version_str         = qla24xx_fw_version_str,
2117        .intr_handler           = qla24xx_intr_handler,
2118        .enable_intrs           = qla24xx_enable_intrs,
2119        .disable_intrs          = qla24xx_disable_intrs,
2120        .abort_command          = qla24xx_abort_command,
2121        .target_reset           = qla24xx_abort_target,
2122        .lun_reset              = qla24xx_lun_reset,
2123        .fabric_login           = qla24xx_login_fabric,
2124        .fabric_logout          = qla24xx_fabric_logout,
2125        .calc_req_entries       = NULL,
2126        .build_iocbs            = NULL,
2127        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2128        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2129        .read_nvram             = NULL,
2130        .write_nvram            = NULL,
2131        .fw_dump                = qla81xx_fw_dump,
2132        .beacon_on              = qla24xx_beacon_on,
2133        .beacon_off             = qla24xx_beacon_off,
2134        .beacon_blink           = qla83xx_beacon_blink,
2135        .read_optrom            = qla25xx_read_optrom_data,
2136        .write_optrom           = qla24xx_write_optrom_data,
2137        .get_flash_version      = qla24xx_get_flash_version,
2138        .start_scsi             = qla24xx_dif_start_scsi,
2139        .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2140        .abort_isp              = qla2x00_abort_isp,
2141        .iospace_config         = qla2x00_iospace_config,
2142        .initialize_adapter     = qla2x00_initialize_adapter,
2143};
2144
2145static struct isp_operations qla82xx_isp_ops = {
2146        .pci_config             = qla82xx_pci_config,
2147        .reset_chip             = qla82xx_reset_chip,
2148        .chip_diag              = qla24xx_chip_diag,
2149        .config_rings           = qla82xx_config_rings,
2150        .reset_adapter          = qla24xx_reset_adapter,
2151        .nvram_config           = qla81xx_nvram_config,
2152        .update_fw_options      = qla24xx_update_fw_options,
2153        .load_risc              = qla82xx_load_risc,
2154        .pci_info_str           = qla24xx_pci_info_str,
2155        .fw_version_str         = qla24xx_fw_version_str,
2156        .intr_handler           = qla82xx_intr_handler,
2157        .enable_intrs           = qla82xx_enable_intrs,
2158        .disable_intrs          = qla82xx_disable_intrs,
2159        .abort_command          = qla24xx_abort_command,
2160        .target_reset           = qla24xx_abort_target,
2161        .lun_reset              = qla24xx_lun_reset,
2162        .fabric_login           = qla24xx_login_fabric,
2163        .fabric_logout          = qla24xx_fabric_logout,
2164        .calc_req_entries       = NULL,
2165        .build_iocbs            = NULL,
2166        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2167        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2168        .read_nvram             = qla24xx_read_nvram_data,
2169        .write_nvram            = qla24xx_write_nvram_data,
2170        .fw_dump                = qla82xx_fw_dump,
2171        .beacon_on              = qla82xx_beacon_on,
2172        .beacon_off             = qla82xx_beacon_off,
2173        .beacon_blink           = NULL,
2174        .read_optrom            = qla82xx_read_optrom_data,
2175        .write_optrom           = qla82xx_write_optrom_data,
2176        .get_flash_version      = qla82xx_get_flash_version,
2177        .start_scsi             = qla82xx_start_scsi,
2178        .start_scsi_mq          = NULL,
2179        .abort_isp              = qla82xx_abort_isp,
2180        .iospace_config         = qla82xx_iospace_config,
2181        .initialize_adapter     = qla2x00_initialize_adapter,
2182};
2183
2184static struct isp_operations qla8044_isp_ops = {
2185        .pci_config             = qla82xx_pci_config,
2186        .reset_chip             = qla82xx_reset_chip,
2187        .chip_diag              = qla24xx_chip_diag,
2188        .config_rings           = qla82xx_config_rings,
2189        .reset_adapter          = qla24xx_reset_adapter,
2190        .nvram_config           = qla81xx_nvram_config,
2191        .update_fw_options      = qla24xx_update_fw_options,
2192        .load_risc              = qla82xx_load_risc,
2193        .pci_info_str           = qla24xx_pci_info_str,
2194        .fw_version_str         = qla24xx_fw_version_str,
2195        .intr_handler           = qla8044_intr_handler,
2196        .enable_intrs           = qla82xx_enable_intrs,
2197        .disable_intrs          = qla82xx_disable_intrs,
2198        .abort_command          = qla24xx_abort_command,
2199        .target_reset           = qla24xx_abort_target,
2200        .lun_reset              = qla24xx_lun_reset,
2201        .fabric_login           = qla24xx_login_fabric,
2202        .fabric_logout          = qla24xx_fabric_logout,
2203        .calc_req_entries       = NULL,
2204        .build_iocbs            = NULL,
2205        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2206        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2207        .read_nvram             = NULL,
2208        .write_nvram            = NULL,
2209        .fw_dump                = qla8044_fw_dump,
2210        .beacon_on              = qla82xx_beacon_on,
2211        .beacon_off             = qla82xx_beacon_off,
2212        .beacon_blink           = NULL,
2213        .read_optrom            = qla8044_read_optrom_data,
2214        .write_optrom           = qla8044_write_optrom_data,
2215        .get_flash_version      = qla82xx_get_flash_version,
2216        .start_scsi             = qla82xx_start_scsi,
2217        .start_scsi_mq          = NULL,
2218        .abort_isp              = qla8044_abort_isp,
2219        .iospace_config         = qla82xx_iospace_config,
2220        .initialize_adapter     = qla2x00_initialize_adapter,
2221};
2222
2223static struct isp_operations qla83xx_isp_ops = {
2224        .pci_config             = qla25xx_pci_config,
2225        .reset_chip             = qla24xx_reset_chip,
2226        .chip_diag              = qla24xx_chip_diag,
2227        .config_rings           = qla24xx_config_rings,
2228        .reset_adapter          = qla24xx_reset_adapter,
2229        .nvram_config           = qla81xx_nvram_config,
2230        .update_fw_options      = qla81xx_update_fw_options,
2231        .load_risc              = qla81xx_load_risc,
2232        .pci_info_str           = qla24xx_pci_info_str,
2233        .fw_version_str         = qla24xx_fw_version_str,
2234        .intr_handler           = qla24xx_intr_handler,
2235        .enable_intrs           = qla24xx_enable_intrs,
2236        .disable_intrs          = qla24xx_disable_intrs,
2237        .abort_command          = qla24xx_abort_command,
2238        .target_reset           = qla24xx_abort_target,
2239        .lun_reset              = qla24xx_lun_reset,
2240        .fabric_login           = qla24xx_login_fabric,
2241        .fabric_logout          = qla24xx_fabric_logout,
2242        .calc_req_entries       = NULL,
2243        .build_iocbs            = NULL,
2244        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2245        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2246        .read_nvram             = NULL,
2247        .write_nvram            = NULL,
2248        .fw_dump                = qla83xx_fw_dump,
2249        .beacon_on              = qla24xx_beacon_on,
2250        .beacon_off             = qla24xx_beacon_off,
2251        .beacon_blink           = qla83xx_beacon_blink,
2252        .read_optrom            = qla25xx_read_optrom_data,
2253        .write_optrom           = qla24xx_write_optrom_data,
2254        .get_flash_version      = qla24xx_get_flash_version,
2255        .start_scsi             = qla24xx_dif_start_scsi,
2256        .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2257        .abort_isp              = qla2x00_abort_isp,
2258        .iospace_config         = qla83xx_iospace_config,
2259        .initialize_adapter     = qla2x00_initialize_adapter,
2260};
2261
2262static struct isp_operations qlafx00_isp_ops = {
2263        .pci_config             = qlafx00_pci_config,
2264        .reset_chip             = qlafx00_soft_reset,
2265        .chip_diag              = qlafx00_chip_diag,
2266        .config_rings           = qlafx00_config_rings,
2267        .reset_adapter          = qlafx00_soft_reset,
2268        .nvram_config           = NULL,
2269        .update_fw_options      = NULL,
2270        .load_risc              = NULL,
2271        .pci_info_str           = qlafx00_pci_info_str,
2272        .fw_version_str         = qlafx00_fw_version_str,
2273        .intr_handler           = qlafx00_intr_handler,
2274        .enable_intrs           = qlafx00_enable_intrs,
2275        .disable_intrs          = qlafx00_disable_intrs,
2276        .abort_command          = qla24xx_async_abort_command,
2277        .target_reset           = qlafx00_abort_target,
2278        .lun_reset              = qlafx00_lun_reset,
2279        .fabric_login           = NULL,
2280        .fabric_logout          = NULL,
2281        .calc_req_entries       = NULL,
2282        .build_iocbs            = NULL,
2283        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2284        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2285        .read_nvram             = qla24xx_read_nvram_data,
2286        .write_nvram            = qla24xx_write_nvram_data,
2287        .fw_dump                = NULL,
2288        .beacon_on              = qla24xx_beacon_on,
2289        .beacon_off             = qla24xx_beacon_off,
2290        .beacon_blink           = NULL,
2291        .read_optrom            = qla24xx_read_optrom_data,
2292        .write_optrom           = qla24xx_write_optrom_data,
2293        .get_flash_version      = qla24xx_get_flash_version,
2294        .start_scsi             = qlafx00_start_scsi,
2295        .start_scsi_mq          = NULL,
2296        .abort_isp              = qlafx00_abort_isp,
2297        .iospace_config         = qlafx00_iospace_config,
2298        .initialize_adapter     = qlafx00_initialize_adapter,
2299};
2300
2301static struct isp_operations qla27xx_isp_ops = {
2302        .pci_config             = qla25xx_pci_config,
2303        .reset_chip             = qla24xx_reset_chip,
2304        .chip_diag              = qla24xx_chip_diag,
2305        .config_rings           = qla24xx_config_rings,
2306        .reset_adapter          = qla24xx_reset_adapter,
2307        .nvram_config           = qla81xx_nvram_config,
2308        .update_fw_options      = qla81xx_update_fw_options,
2309        .load_risc              = qla81xx_load_risc,
2310        .pci_info_str           = qla24xx_pci_info_str,
2311        .fw_version_str         = qla24xx_fw_version_str,
2312        .intr_handler           = qla24xx_intr_handler,
2313        .enable_intrs           = qla24xx_enable_intrs,
2314        .disable_intrs          = qla24xx_disable_intrs,
2315        .abort_command          = qla24xx_abort_command,
2316        .target_reset           = qla24xx_abort_target,
2317        .lun_reset              = qla24xx_lun_reset,
2318        .fabric_login           = qla24xx_login_fabric,
2319        .fabric_logout          = qla24xx_fabric_logout,
2320        .calc_req_entries       = NULL,
2321        .build_iocbs            = NULL,
2322        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
2323        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
2324        .read_nvram             = NULL,
2325        .write_nvram            = NULL,
2326        .fw_dump                = qla27xx_fwdump,
2327        .beacon_on              = qla24xx_beacon_on,
2328        .beacon_off             = qla24xx_beacon_off,
2329        .beacon_blink           = qla83xx_beacon_blink,
2330        .read_optrom            = qla25xx_read_optrom_data,
2331        .write_optrom           = qla24xx_write_optrom_data,
2332        .get_flash_version      = qla24xx_get_flash_version,
2333        .start_scsi             = qla24xx_dif_start_scsi,
2334        .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2335        .abort_isp              = qla2x00_abort_isp,
2336        .iospace_config         = qla83xx_iospace_config,
2337        .initialize_adapter     = qla2x00_initialize_adapter,
2338};
2339
2340static inline void
2341qla2x00_set_isp_flags(struct qla_hw_data *ha)
2342{
2343        ha->device_type = DT_EXTENDED_IDS;
2344        switch (ha->pdev->device) {
2345        case PCI_DEVICE_ID_QLOGIC_ISP2100:
2346                ha->isp_type |= DT_ISP2100;
2347                ha->device_type &= ~DT_EXTENDED_IDS;
2348                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2349                break;
2350        case PCI_DEVICE_ID_QLOGIC_ISP2200:
2351                ha->isp_type |= DT_ISP2200;
2352                ha->device_type &= ~DT_EXTENDED_IDS;
2353                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2354                break;
2355        case PCI_DEVICE_ID_QLOGIC_ISP2300:
2356                ha->isp_type |= DT_ISP2300;
2357                ha->device_type |= DT_ZIO_SUPPORTED;
2358                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2359                break;
2360        case PCI_DEVICE_ID_QLOGIC_ISP2312:
2361                ha->isp_type |= DT_ISP2312;
2362                ha->device_type |= DT_ZIO_SUPPORTED;
2363                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2364                break;
2365        case PCI_DEVICE_ID_QLOGIC_ISP2322:
2366                ha->isp_type |= DT_ISP2322;
2367                ha->device_type |= DT_ZIO_SUPPORTED;
2368                if (ha->pdev->subsystem_vendor == 0x1028 &&
2369                    ha->pdev->subsystem_device == 0x0170)
2370                        ha->device_type |= DT_OEM_001;
2371                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2372                break;
2373        case PCI_DEVICE_ID_QLOGIC_ISP6312:
2374                ha->isp_type |= DT_ISP6312;
2375                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2376                break;
2377        case PCI_DEVICE_ID_QLOGIC_ISP6322:
2378                ha->isp_type |= DT_ISP6322;
2379                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2380                break;
2381        case PCI_DEVICE_ID_QLOGIC_ISP2422:
2382                ha->isp_type |= DT_ISP2422;
2383                ha->device_type |= DT_ZIO_SUPPORTED;
2384                ha->device_type |= DT_FWI2;
2385                ha->device_type |= DT_IIDMA;
2386                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2387                break;
2388        case PCI_DEVICE_ID_QLOGIC_ISP2432:
2389                ha->isp_type |= DT_ISP2432;
2390                ha->device_type |= DT_ZIO_SUPPORTED;
2391                ha->device_type |= DT_FWI2;
2392                ha->device_type |= DT_IIDMA;
2393                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2394                break;
2395        case PCI_DEVICE_ID_QLOGIC_ISP8432:
2396                ha->isp_type |= DT_ISP8432;
2397                ha->device_type |= DT_ZIO_SUPPORTED;
2398                ha->device_type |= DT_FWI2;
2399                ha->device_type |= DT_IIDMA;
2400                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2401                break;
2402        case PCI_DEVICE_ID_QLOGIC_ISP5422:
2403                ha->isp_type |= DT_ISP5422;
2404                ha->device_type |= DT_FWI2;
2405                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2406                break;
2407        case PCI_DEVICE_ID_QLOGIC_ISP5432:
2408                ha->isp_type |= DT_ISP5432;
2409                ha->device_type |= DT_FWI2;
2410                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2411                break;
2412        case PCI_DEVICE_ID_QLOGIC_ISP2532:
2413                ha->isp_type |= DT_ISP2532;
2414                ha->device_type |= DT_ZIO_SUPPORTED;
2415                ha->device_type |= DT_FWI2;
2416                ha->device_type |= DT_IIDMA;
2417                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2418                break;
2419        case PCI_DEVICE_ID_QLOGIC_ISP8001:
2420                ha->isp_type |= DT_ISP8001;
2421                ha->device_type |= DT_ZIO_SUPPORTED;
2422                ha->device_type |= DT_FWI2;
2423                ha->device_type |= DT_IIDMA;
2424                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2425                break;
2426        case PCI_DEVICE_ID_QLOGIC_ISP8021:
2427                ha->isp_type |= DT_ISP8021;
2428                ha->device_type |= DT_ZIO_SUPPORTED;
2429                ha->device_type |= DT_FWI2;
2430                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2431                /* Initialize 82XX ISP flags */
2432                qla82xx_init_flags(ha);
2433                break;
2434         case PCI_DEVICE_ID_QLOGIC_ISP8044:
2435                ha->isp_type |= DT_ISP8044;
2436                ha->device_type |= DT_ZIO_SUPPORTED;
2437                ha->device_type |= DT_FWI2;
2438                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2439                /* Initialize 82XX ISP flags */
2440                qla82xx_init_flags(ha);
2441                break;
2442        case PCI_DEVICE_ID_QLOGIC_ISP2031:
2443                ha->isp_type |= DT_ISP2031;
2444                ha->device_type |= DT_ZIO_SUPPORTED;
2445                ha->device_type |= DT_FWI2;
2446                ha->device_type |= DT_IIDMA;
2447                ha->device_type |= DT_T10_PI;
2448                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2449                break;
2450        case PCI_DEVICE_ID_QLOGIC_ISP8031:
2451                ha->isp_type |= DT_ISP8031;
2452                ha->device_type |= DT_ZIO_SUPPORTED;
2453                ha->device_type |= DT_FWI2;
2454                ha->device_type |= DT_IIDMA;
2455                ha->device_type |= DT_T10_PI;
2456                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2457                break;
2458        case PCI_DEVICE_ID_QLOGIC_ISPF001:
2459                ha->isp_type |= DT_ISPFX00;
2460                break;
2461        case PCI_DEVICE_ID_QLOGIC_ISP2071:
2462                ha->isp_type |= DT_ISP2071;
2463                ha->device_type |= DT_ZIO_SUPPORTED;
2464                ha->device_type |= DT_FWI2;
2465                ha->device_type |= DT_IIDMA;
2466                ha->device_type |= DT_T10_PI;
2467                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2468                break;
2469        case PCI_DEVICE_ID_QLOGIC_ISP2271:
2470                ha->isp_type |= DT_ISP2271;
2471                ha->device_type |= DT_ZIO_SUPPORTED;
2472                ha->device_type |= DT_FWI2;
2473                ha->device_type |= DT_IIDMA;
2474                ha->device_type |= DT_T10_PI;
2475                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2476                break;
2477        case PCI_DEVICE_ID_QLOGIC_ISP2261:
2478                ha->isp_type |= DT_ISP2261;
2479                ha->device_type |= DT_ZIO_SUPPORTED;
2480                ha->device_type |= DT_FWI2;
2481                ha->device_type |= DT_IIDMA;
2482                ha->device_type |= DT_T10_PI;
2483                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2484                break;
2485        }
2486
2487        if (IS_QLA82XX(ha))
2488                ha->port_no = ha->portnum & 1;
2489        else {
2490                /* Get adapter physical port no from interrupt pin register. */
2491                pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2492                if (IS_QLA27XX(ha))
2493                        ha->port_no--;
2494                else
2495                        ha->port_no = !(ha->port_no & 1);
2496        }
2497
2498        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2499            "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2500            ha->device_type, ha->port_no, ha->fw_srisc_address);
2501}
2502
2503static void
2504qla2xxx_scan_start(struct Scsi_Host *shost)
2505{
2506        scsi_qla_host_t *vha = shost_priv(shost);
2507
2508        if (vha->hw->flags.running_gold_fw)
2509                return;
2510
2511        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2512        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2513        set_bit(RSCN_UPDATE, &vha->dpc_flags);
2514        set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
2515}
2516
2517static int
2518qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2519{
2520        scsi_qla_host_t *vha = shost_priv(shost);
2521
2522        if (test_bit(UNLOADING, &vha->dpc_flags))
2523                return 1;
2524        if (!vha->host)
2525                return 1;
2526        if (time > vha->hw->loop_reset_delay * HZ)
2527                return 1;
2528
2529        return atomic_read(&vha->loop_state) == LOOP_READY;
2530}
2531
2532/*
2533 * PCI driver interface
2534 */
2535static int
2536qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2537{
2538        int     ret = -ENODEV;
2539        struct Scsi_Host *host;
2540        scsi_qla_host_t *base_vha = NULL;
2541        struct qla_hw_data *ha;
2542        char pci_info[30];
2543        char fw_str[30], wq_name[30];
2544        struct scsi_host_template *sht;
2545        int bars, mem_only = 0;
2546        uint16_t req_length = 0, rsp_length = 0;
2547        struct req_que *req = NULL;
2548        struct rsp_que *rsp = NULL;
2549        int i;
2550
2551        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2552        sht = &qla2xxx_driver_template;
2553        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
2554            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
2555            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
2556            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
2557            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
2558            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
2559            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
2560            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2561            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2562            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2563            pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2564            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2565            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2566            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2567            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
2568                bars = pci_select_bars(pdev, IORESOURCE_MEM);
2569                mem_only = 1;
2570                ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2571                    "Mem only adapter.\n");
2572        }
2573        ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2574            "Bars=%d.\n", bars);
2575
2576        if (mem_only) {
2577                if (pci_enable_device_mem(pdev))
2578                        goto probe_out;
2579        } else {
2580                if (pci_enable_device(pdev))
2581                        goto probe_out;
2582        }
2583
2584        /* This may fail but that's ok */
2585        pci_enable_pcie_error_reporting(pdev);
2586
2587        ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
2588        if (!ha) {
2589                ql_log_pci(ql_log_fatal, pdev, 0x0009,
2590                    "Unable to allocate memory for ha.\n");
2591                goto probe_out;
2592        }
2593        ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2594            "Memory allocated for ha=%p.\n", ha);
2595        ha->pdev = pdev;
2596        ha->tgt.enable_class_2 = ql2xenableclass2;
2597        INIT_LIST_HEAD(&ha->tgt.q_full_list);
2598        spin_lock_init(&ha->tgt.q_full_lock);
2599        spin_lock_init(&ha->tgt.sess_lock);
2600        spin_lock_init(&ha->tgt.atio_lock);
2601
2602
2603        /* Clear our data area */
2604        ha->bars = bars;
2605        ha->mem_only = mem_only;
2606        spin_lock_init(&ha->hardware_lock);
2607        spin_lock_init(&ha->vport_slock);
2608        mutex_init(&ha->selflogin_lock);
2609        mutex_init(&ha->optrom_mutex);
2610
2611        /* Set ISP-type information. */
2612        qla2x00_set_isp_flags(ha);
2613
2614        /* Set EEH reset type to fundamental if required by hba */
2615        if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2616            IS_QLA83XX(ha) || IS_QLA27XX(ha))
2617                pdev->needs_freset = 1;
2618
2619        ha->prev_topology = 0;
2620        ha->init_cb_size = sizeof(init_cb_t);
2621        ha->link_data_rate = PORT_SPEED_UNKNOWN;
2622        ha->optrom_size = OPTROM_SIZE_2300;
2623
2624        /* Assign ISP specific operations. */
2625        if (IS_QLA2100(ha)) {
2626                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2627                ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
2628                req_length = REQUEST_ENTRY_CNT_2100;
2629                rsp_length = RESPONSE_ENTRY_CNT_2100;
2630                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2631                ha->gid_list_info_size = 4;
2632                ha->flash_conf_off = ~0;
2633                ha->flash_data_off = ~0;
2634                ha->nvram_conf_off = ~0;
2635                ha->nvram_data_off = ~0;
2636                ha->isp_ops = &qla2100_isp_ops;
2637        } else if (IS_QLA2200(ha)) {
2638                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2639                ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
2640                req_length = REQUEST_ENTRY_CNT_2200;
2641                rsp_length = RESPONSE_ENTRY_CNT_2100;
2642                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2643                ha->gid_list_info_size = 4;
2644                ha->flash_conf_off = ~0;
2645                ha->flash_data_off = ~0;
2646                ha->nvram_conf_off = ~0;
2647                ha->nvram_data_off = ~0;
2648                ha->isp_ops = &qla2100_isp_ops;
2649        } else if (IS_QLA23XX(ha)) {
2650                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2651                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2652                req_length = REQUEST_ENTRY_CNT_2200;
2653                rsp_length = RESPONSE_ENTRY_CNT_2300;
2654                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2655                ha->gid_list_info_size = 6;
2656                if (IS_QLA2322(ha) || IS_QLA6322(ha))
2657                        ha->optrom_size = OPTROM_SIZE_2322;
2658                ha->flash_conf_off = ~0;
2659                ha->flash_data_off = ~0;
2660                ha->nvram_conf_off = ~0;
2661                ha->nvram_data_off = ~0;
2662                ha->isp_ops = &qla2300_isp_ops;
2663        } else if (IS_QLA24XX_TYPE(ha)) {
2664                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2665                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2666                req_length = REQUEST_ENTRY_CNT_24XX;
2667                rsp_length = RESPONSE_ENTRY_CNT_2300;
2668                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2669                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2670                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2671                ha->gid_list_info_size = 8;
2672                ha->optrom_size = OPTROM_SIZE_24XX;
2673                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
2674                ha->isp_ops = &qla24xx_isp_ops;
2675                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2676                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2677                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2678                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2679        } else if (IS_QLA25XX(ha)) {
2680                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2681                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2682                req_length = REQUEST_ENTRY_CNT_24XX;
2683                rsp_length = RESPONSE_ENTRY_CNT_2300;
2684                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2685                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2686                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2687                ha->gid_list_info_size = 8;
2688                ha->optrom_size = OPTROM_SIZE_25XX;
2689                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2690                ha->isp_ops = &qla25xx_isp_ops;
2691                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2692                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2693                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2694                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2695        } else if (IS_QLA81XX(ha)) {
2696                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2697                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2698                req_length = REQUEST_ENTRY_CNT_24XX;
2699                rsp_length = RESPONSE_ENTRY_CNT_2300;
2700                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2701                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2702                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2703                ha->gid_list_info_size = 8;
2704                ha->optrom_size = OPTROM_SIZE_81XX;
2705                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2706                ha->isp_ops = &qla81xx_isp_ops;
2707                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2708                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2709                ha->nvram_conf_off = ~0;
2710                ha->nvram_data_off = ~0;
2711        } else if (IS_QLA82XX(ha)) {
2712                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2713                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2714                req_length = REQUEST_ENTRY_CNT_82XX;
2715                rsp_length = RESPONSE_ENTRY_CNT_82XX;
2716                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2717                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2718                ha->gid_list_info_size = 8;
2719                ha->optrom_size = OPTROM_SIZE_82XX;
2720                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2721                ha->isp_ops = &qla82xx_isp_ops;
2722                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2723                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2724                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2725                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2726        } else if (IS_QLA8044(ha)) {
2727                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2728                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2729                req_length = REQUEST_ENTRY_CNT_82XX;
2730                rsp_length = RESPONSE_ENTRY_CNT_82XX;
2731                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2732                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2733                ha->gid_list_info_size = 8;
2734                ha->optrom_size = OPTROM_SIZE_83XX;
2735                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2736                ha->isp_ops = &qla8044_isp_ops;
2737                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2738                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2739                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2740                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2741        } else if (IS_QLA83XX(ha)) {
2742                ha->portnum = PCI_FUNC(ha->pdev->devfn);
2743                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2744                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2745                req_length = REQUEST_ENTRY_CNT_83XX;
2746                rsp_length = RESPONSE_ENTRY_CNT_83XX;
2747                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2748                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2749                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2750                ha->gid_list_info_size = 8;
2751                ha->optrom_size = OPTROM_SIZE_83XX;
2752                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2753                ha->isp_ops = &qla83xx_isp_ops;
2754                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2755                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2756                ha->nvram_conf_off = ~0;
2757                ha->nvram_data_off = ~0;
2758        }  else if (IS_QLAFX00(ha)) {
2759                ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
2760                ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
2761                ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
2762                req_length = REQUEST_ENTRY_CNT_FX00;
2763                rsp_length = RESPONSE_ENTRY_CNT_FX00;
2764                ha->isp_ops = &qlafx00_isp_ops;
2765                ha->port_down_retry_count = 30; /* default value */
2766                ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
2767                ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
2768                ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
2769                ha->mr.fw_hbt_en = 1;
2770                ha->mr.host_info_resend = false;
2771                ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
2772        } else if (IS_QLA27XX(ha)) {
2773                ha->portnum = PCI_FUNC(ha->pdev->devfn);
2774                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2775                ha->mbx_count = MAILBOX_REGISTER_COUNT;
2776                req_length = REQUEST_ENTRY_CNT_83XX;
2777                rsp_length = RESPONSE_ENTRY_CNT_83XX;
2778                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2779                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2780                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2781                ha->gid_list_info_size = 8;
2782                ha->optrom_size = OPTROM_SIZE_83XX;
2783                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2784                ha->isp_ops = &qla27xx_isp_ops;
2785                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2786                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2787                ha->nvram_conf_off = ~0;
2788                ha->nvram_data_off = ~0;
2789        }
2790
2791        ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2792            "mbx_count=%d, req_length=%d, "
2793            "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2794            "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
2795            "max_fibre_devices=%d.\n",
2796            ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2797            ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2798            ha->nvram_npiv_size, ha->max_fibre_devices);
2799        ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2800            "isp_ops=%p, flash_conf_off=%d, "
2801            "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2802            ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2803            ha->nvram_conf_off, ha->nvram_data_off);
2804
2805        /* Configure PCI I/O space */
2806        ret = ha->isp_ops->iospace_config(ha);
2807        if (ret)
2808                goto iospace_config_failed;
2809
2810        ql_log_pci(ql_log_info, pdev, 0x001d,
2811            "Found an ISP%04X irq %d iobase 0x%p.\n",
2812            pdev->device, pdev->irq, ha->iobase);
2813        mutex_init(&ha->vport_lock);
2814        mutex_init(&ha->mq_lock);
2815        init_completion(&ha->mbx_cmd_comp);
2816        complete(&ha->mbx_cmd_comp);
2817        init_completion(&ha->mbx_intr_comp);
2818        init_completion(&ha->dcbx_comp);
2819        init_completion(&ha->lb_portup_comp);
2820
2821        set_bit(0, (unsigned long *) ha->vp_idx_map);
2822
2823        qla2x00_config_dma_addressing(ha);
2824        ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2825            "64 Bit addressing is %s.\n",
2826            ha->flags.enable_64bit_addressing ? "enable" :
2827            "disable");
2828        ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2829        if (ret) {
2830                ql_log_pci(ql_log_fatal, pdev, 0x0031,
2831                    "Failed to allocate memory for adapter, aborting.\n");
2832
2833                goto probe_hw_failed;
2834        }
2835
2836        req->max_q_depth = MAX_Q_DEPTH;
2837        if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
2838                req->max_q_depth = ql2xmaxqdepth;
2839
2840
2841        base_vha = qla2x00_create_host(sht, ha);
2842        if (!base_vha) {
2843                ret = -ENOMEM;
2844                qla2x00_mem_free(ha);
2845                qla2x00_free_req_que(ha, req);
2846                qla2x00_free_rsp_que(ha, rsp);
2847                goto probe_hw_failed;
2848        }
2849
2850        pci_set_drvdata(pdev, base_vha);
2851        set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
2852
2853        host = base_vha->host;
2854        base_vha->req = req;
2855        if (IS_QLA2XXX_MIDTYPE(ha))
2856                base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2857        else
2858                base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2859                                                base_vha->vp_idx;
2860
2861        /* Setup fcport template structure. */
2862        ha->mr.fcport.vha = base_vha;
2863        ha->mr.fcport.port_type = FCT_UNKNOWN;
2864        ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
2865        qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
2866        ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
2867        ha->mr.fcport.scan_state = 1;
2868
2869        /* Set the SG table size based on ISP type */
2870        if (!IS_FWI2_CAPABLE(ha)) {
2871                if (IS_QLA2100(ha))
2872                        host->sg_tablesize = 32;
2873        } else {
2874                if (!IS_QLA82XX(ha))
2875                        host->sg_tablesize = QLA_SG_ALL;
2876        }
2877        host->max_id = ha->max_fibre_devices;
2878        host->cmd_per_lun = 3;
2879        host->unique_id = host->host_no;
2880        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
2881                host->max_cmd_len = 32;
2882        else
2883                host->max_cmd_len = MAX_CMDSZ;
2884        host->max_channel = MAX_BUSES - 1;
2885        /* Older HBAs support only 16-bit LUNs */
2886        if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
2887            ql2xmaxlun > 0xffff)
2888                host->max_lun = 0xffff;
2889        else
2890                host->max_lun = ql2xmaxlun;
2891        host->transportt = qla2xxx_transport_template;
2892        sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2893
2894        ql_dbg(ql_dbg_init, base_vha, 0x0033,
2895            "max_id=%d this_id=%d "
2896            "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2897            "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
2898            host->this_id, host->cmd_per_lun, host->unique_id,
2899            host->max_cmd_len, host->max_channel, host->max_lun,
2900            host->transportt, sht->vendor_id);
2901
2902        /* Set up the irqs */
2903        ret = qla2x00_request_irqs(ha, rsp);
2904        if (ret)
2905                goto probe_init_failed;
2906
2907        /* Alloc arrays of request and response ring ptrs */
2908        if (!qla2x00_alloc_queues(ha, req, rsp)) {
2909                ql_log(ql_log_fatal, base_vha, 0x003d,
2910                    "Failed to allocate memory for queue pointers..."
2911                    "aborting.\n");
2912                goto probe_init_failed;
2913        }
2914
2915        if (ha->mqenable && shost_use_blk_mq(host)) {
2916                /* number of hardware queues supported by blk/scsi-mq*/
2917                host->nr_hw_queues = ha->max_qpairs;
2918
2919                ql_dbg(ql_dbg_init, base_vha, 0x0192,
2920                        "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
2921        } else
2922                ql_dbg(ql_dbg_init, base_vha, 0x0193,
2923                        "blk/scsi-mq disabled.\n");
2924
2925        qlt_probe_one_stage1(base_vha, ha);
2926
2927        pci_save_state(pdev);
2928
2929        /* Assign back pointers */
2930        rsp->req = req;
2931        req->rsp = rsp;
2932
2933        if (IS_QLAFX00(ha)) {
2934                ha->rsp_q_map[0] = rsp;
2935                ha->req_q_map[0] = req;
2936                set_bit(0, ha->req_qid_map);
2937                set_bit(0, ha->rsp_qid_map);
2938        }
2939
2940        /* FWI2-capable only. */
2941        req->req_q_in = &ha->iobase->isp24.req_q_in;
2942        req->req_q_out = &ha->iobase->isp24.req_q_out;
2943        rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2944        rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2945        if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2946                req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2947                req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2948                rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2949                rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
2950        }
2951
2952        if (IS_QLAFX00(ha)) {
2953                req->req_q_in = &ha->iobase->ispfx00.req_q_in;
2954                req->req_q_out = &ha->iobase->ispfx00.req_q_out;
2955                rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
2956                rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
2957        }
2958
2959        if (IS_P3P_TYPE(ha)) {
2960                req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2961                rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2962                rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2963        }
2964
2965        ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2966            "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2967            ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2968        ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2969            "req->req_q_in=%p req->req_q_out=%p "
2970            "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2971            req->req_q_in, req->req_q_out,
2972            rsp->rsp_q_in, rsp->rsp_q_out);
2973        ql_dbg(ql_dbg_init, base_vha, 0x003e,
2974            "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2975            ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2976        ql_dbg(ql_dbg_init, base_vha, 0x003f,
2977            "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2978            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2979
2980        if (ha->isp_ops->initialize_adapter(base_vha)) {
2981                ql_log(ql_log_fatal, base_vha, 0x00d6,
2982                    "Failed to initialize adapter - Adapter flags %x.\n",
2983                    base_vha->device_flags);
2984
2985                if (IS_QLA82XX(ha)) {
2986                        qla82xx_idc_lock(ha);
2987                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2988                                QLA8XXX_DEV_FAILED);
2989                        qla82xx_idc_unlock(ha);
2990                        ql_log(ql_log_fatal, base_vha, 0x00d7,
2991                            "HW State: FAILED.\n");
2992                } else if (IS_QLA8044(ha)) {
2993                        qla8044_idc_lock(ha);
2994                        qla8044_wr_direct(base_vha,
2995                                QLA8044_CRB_DEV_STATE_INDEX,
2996                                QLA8XXX_DEV_FAILED);
2997                        qla8044_idc_unlock(ha);
2998                        ql_log(ql_log_fatal, base_vha, 0x0150,
2999                            "HW State: FAILED.\n");
3000                }
3001
3002                ret = -ENODEV;
3003                goto probe_failed;
3004        }
3005
3006        if (IS_QLAFX00(ha))
3007                host->can_queue = QLAFX00_MAX_CANQUEUE;
3008        else
3009                host->can_queue = req->num_outstanding_cmds - 10;
3010
3011        ql_dbg(ql_dbg_init, base_vha, 0x0032,
3012            "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
3013            host->can_queue, base_vha->req,
3014            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3015
3016        if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
3017                ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
3018                /* Create start of day qpairs for Block MQ */
3019                if (shost_use_blk_mq(host)) {
3020                        for (i = 0; i < ha->max_qpairs; i++)
3021                                qla2xxx_create_qpair(base_vha, 5, 0);
3022                }
3023        }
3024
3025        if (ha->flags.running_gold_fw)
3026                goto skip_dpc;
3027
3028        /*
3029         * Startup the kernel thread for this host adapter
3030         */
3031        ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
3032            "%s_dpc", base_vha->host_str);
3033        if (IS_ERR(ha->dpc_thread)) {
3034                ql_log(ql_log_fatal, base_vha, 0x00ed,
3035                    "Failed to start DPC thread.\n");
3036                ret = PTR_ERR(ha->dpc_thread);
3037                goto probe_failed;
3038        }
3039        ql_dbg(ql_dbg_init, base_vha, 0x00ee,
3040            "DPC thread started successfully.\n");
3041
3042        /*
3043         * If we're not coming up in initiator mode, we might sit for
3044         * a while without waking up the dpc thread, which leads to a
3045         * stuck process warning.  So just kick the dpc once here and
3046         * let the kthread start (and go back to sleep in qla2x00_do_dpc).
3047         */
3048        qla2xxx_wake_dpc(base_vha);
3049
3050        INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3051
3052        if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
3053                sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
3054                ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
3055                INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
3056
3057                sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
3058                ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
3059                INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
3060                INIT_WORK(&ha->idc_state_handler,
3061                    qla83xx_idc_state_handler_work);
3062                INIT_WORK(&ha->nic_core_unrecoverable,
3063                    qla83xx_nic_core_unrecoverable_work);
3064        }
3065
3066skip_dpc:
3067        list_add_tail(&base_vha->list, &ha->vp_list);
3068        base_vha->host->irq = ha->pdev->irq;
3069
3070        /* Initialized the timer */
3071        qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
3072        ql_dbg(ql_dbg_init, base_vha, 0x00ef,
3073            "Started qla2x00_timer with "
3074            "interval=%d.\n", WATCH_INTERVAL);
3075        ql_dbg(ql_dbg_init, base_vha, 0x00f0,
3076            "Detected hba at address=%p.\n",
3077            ha);
3078
3079        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3080                if (ha->fw_attributes & BIT_4) {
3081                        int prot = 0, guard;
3082                        base_vha->flags.difdix_supported = 1;
3083                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
3084                            "Registering for DIF/DIX type 1 and 3 protection.\n");
3085                        if (ql2xenabledif == 1)
3086                                prot = SHOST_DIX_TYPE0_PROTECTION;
3087                        scsi_host_set_prot(host,
3088                            prot | SHOST_DIF_TYPE1_PROTECTION
3089                            | SHOST_DIF_TYPE2_PROTECTION
3090                            | SHOST_DIF_TYPE3_PROTECTION
3091                            | SHOST_DIX_TYPE1_PROTECTION
3092                            | SHOST_DIX_TYPE2_PROTECTION
3093                            | SHOST_DIX_TYPE3_PROTECTION);
3094
3095                        guard = SHOST_DIX_GUARD_CRC;
3096
3097                        if (IS_PI_IPGUARD_CAPABLE(ha) &&
3098                            (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3099                                guard |= SHOST_DIX_GUARD_IP;
3100
3101                        scsi_host_set_guard(host, guard);
3102                } else
3103                        base_vha->flags.difdix_supported = 0;
3104        }
3105
3106        ha->isp_ops->enable_intrs(ha);
3107
3108        if (IS_QLAFX00(ha)) {
3109                ret = qlafx00_fx_disc(base_vha,
3110                        &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
3111                host->sg_tablesize = (ha->mr.extended_io_enabled) ?
3112                    QLA_SG_ALL : 128;
3113        }
3114
3115        ret = scsi_add_host(host, &pdev->dev);
3116        if (ret)
3117                goto probe_failed;
3118
3119        base_vha->flags.init_done = 1;
3120        base_vha->flags.online = 1;
3121        ha->prev_minidump_failed = 0;
3122
3123        ql_dbg(ql_dbg_init, base_vha, 0x00f2,
3124            "Init done and hba is online.\n");
3125
3126        if (qla_ini_mode_enabled(base_vha))
3127                scsi_scan_host(host);
3128        else
3129                ql_dbg(ql_dbg_init, base_vha, 0x0122,
3130                        "skipping scsi_scan_host() for non-initiator port\n");
3131
3132        qla2x00_alloc_sysfs_attr(base_vha);
3133
3134        if (IS_QLAFX00(ha)) {
3135                ret = qlafx00_fx_disc(base_vha,
3136                        &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
3137
3138                /* Register system information */
3139                ret =  qlafx00_fx_disc(base_vha,
3140                        &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
3141        }
3142
3143        qla2x00_init_host_attr(base_vha);
3144
3145        qla2x00_dfs_setup(base_vha);
3146
3147        ql_log(ql_log_info, base_vha, 0x00fb,
3148            "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
3149        ql_log(ql_log_info, base_vha, 0x00fc,
3150            "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3151            pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
3152            pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
3153            base_vha->host_no,
3154            ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
3155
3156        qlt_add_target(ha, base_vha);
3157
3158        clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3159
3160        if (test_bit(UNLOADING, &base_vha->dpc_flags))
3161                return -ENODEV;
3162
3163        return 0;
3164
3165probe_init_failed:
3166        qla2x00_free_req_que(ha, req);
3167        ha->req_q_map[0] = NULL;
3168        clear_bit(0, ha->req_qid_map);
3169        qla2x00_free_rsp_que(ha, rsp);
3170        ha->rsp_q_map[0] = NULL;
3171        clear_bit(0, ha->rsp_qid_map);
3172        ha->max_req_queues = ha->max_rsp_queues = 0;
3173
3174probe_failed:
3175        if (base_vha->timer_active)
3176                qla2x00_stop_timer(base_vha);
3177        base_vha->flags.online = 0;
3178        if (ha->dpc_thread) {
3179                struct task_struct *t = ha->dpc_thread;
3180
3181                ha->dpc_thread = NULL;
3182                kthread_stop(t);
3183        }
3184
3185        qla2x00_free_device(base_vha);
3186
3187        scsi_host_put(base_vha->host);
3188
3189probe_hw_failed:
3190        qla2x00_clear_drv_active(ha);
3191
3192iospace_config_failed:
3193        if (IS_P3P_TYPE(ha)) {
3194                if (!ha->nx_pcibase)
3195                        iounmap((device_reg_t *)ha->nx_pcibase);
3196                if (!ql2xdbwr)
3197                        iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3198        } else {
3199                if (ha->iobase)
3200                        iounmap(ha->iobase);
3201                if (ha->cregbase)
3202                        iounmap(ha->cregbase);
3203        }
3204        pci_release_selected_regions(ha->pdev, ha->bars);
3205        kfree(ha);
3206        ha = NULL;
3207
3208probe_out:
3209        pci_disable_device(pdev);
3210        return ret;
3211}
3212
3213static void
3214qla2x00_shutdown(struct pci_dev *pdev)
3215{
3216        scsi_qla_host_t *vha;
3217        struct qla_hw_data  *ha;
3218
3219        if (!atomic_read(&pdev->enable_cnt))
3220                return;
3221
3222        vha = pci_get_drvdata(pdev);
3223        ha = vha->hw;
3224
3225        /* Notify ISPFX00 firmware */
3226        if (IS_QLAFX00(ha))
3227                qlafx00_driver_shutdown(vha, 20);
3228
3229        /* Turn-off FCE trace */
3230        if (ha->flags.fce_enabled) {
3231                qla2x00_disable_fce_trace(vha, NULL, NULL);
3232                ha->flags.fce_enabled = 0;
3233        }
3234
3235        /* Turn-off EFT trace */
3236        if (ha->eft)
3237                qla2x00_disable_eft_trace(vha);
3238
3239        /* Stop currently executing firmware. */
3240        qla2x00_try_to_stop_firmware(vha);
3241
3242        /* Turn adapter off line */
3243        vha->flags.online = 0;
3244
3245        /* turn-off interrupts on the card */
3246        if (ha->interrupts_on) {
3247                vha->flags.init_done = 0;
3248                ha->isp_ops->disable_intrs(ha);
3249        }
3250
3251        qla2x00_free_irqs(vha);
3252
3253        qla2x00_free_fw_dump(ha);
3254
3255        pci_disable_pcie_error_reporting(pdev);
3256        pci_disable_device(pdev);
3257}
3258
3259/* Deletes all the virtual ports for a given ha */
3260static void
3261qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3262{
3263        scsi_qla_host_t *vha;
3264        unsigned long flags;
3265
3266        mutex_lock(&ha->vport_lock);
3267        while (ha->cur_vport_count) {
3268                spin_lock_irqsave(&ha->vport_slock, flags);
3269
3270                BUG_ON(base_vha->list.next == &ha->vp_list);
3271                /* This assumes first entry in ha->vp_list is always base vha */
3272                vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
3273                scsi_host_get(vha->host);
3274
3275                spin_unlock_irqrestore(&ha->vport_slock, flags);
3276                mutex_unlock(&ha->vport_lock);
3277
3278                fc_vport_terminate(vha->fc_vport);
3279                scsi_host_put(vha->host);
3280
3281                mutex_lock(&ha->vport_lock);
3282        }
3283        mutex_unlock(&ha->vport_lock);
3284}
3285
3286/* Stops all deferred work threads */
3287static void
3288qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
3289{
3290        /* Cancel all work and destroy DPC workqueues */
3291        if (ha->dpc_lp_wq) {
3292                cancel_work_sync(&ha->idc_aen);
3293                destroy_workqueue(ha->dpc_lp_wq);
3294                ha->dpc_lp_wq = NULL;
3295        }
3296
3297        if (ha->dpc_hp_wq) {
3298                cancel_work_sync(&ha->nic_core_reset);
3299                cancel_work_sync(&ha->idc_state_handler);
3300                cancel_work_sync(&ha->nic_core_unrecoverable);
3301                destroy_workqueue(ha->dpc_hp_wq);
3302                ha->dpc_hp_wq = NULL;
3303        }
3304
3305        /* Kill the kernel thread for this host */
3306        if (ha->dpc_thread) {
3307                struct task_struct *t = ha->dpc_thread;
3308
3309                /*
3310                 * qla2xxx_wake_dpc checks for ->dpc_thread
3311                 * so we need to zero it out.
3312                 */
3313                ha->dpc_thread = NULL;
3314                kthread_stop(t);
3315        }
3316}
3317
3318static void
3319qla2x00_unmap_iobases(struct qla_hw_data *ha)
3320{
3321        if (IS_QLA82XX(ha)) {
3322
3323                iounmap((device_reg_t *)ha->nx_pcibase);
3324                if (!ql2xdbwr)
3325                        iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3326        } else {
3327                if (ha->iobase)
3328                        iounmap(ha->iobase);
3329
3330                if (ha->cregbase)
3331                        iounmap(ha->cregbase);
3332
3333                if (ha->mqiobase)
3334                        iounmap(ha->mqiobase);
3335
3336                if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
3337                        iounmap(ha->msixbase);
3338        }
3339}
3340
3341static void
3342qla2x00_clear_drv_active(struct qla_hw_data *ha)
3343{
3344        if (IS_QLA8044(ha)) {
3345                qla8044_idc_lock(ha);
3346                qla8044_clear_drv_active(ha);
3347                qla8044_idc_unlock(ha);
3348        } else if (IS_QLA82XX(ha)) {
3349                qla82xx_idc_lock(ha);
3350                qla82xx_clear_drv_active(ha);
3351                qla82xx_idc_unlock(ha);
3352        }
3353}
3354
3355static void
3356qla2x00_remove_one(struct pci_dev *pdev)
3357{
3358        scsi_qla_host_t *base_vha;
3359        struct qla_hw_data  *ha;
3360
3361        base_vha = pci_get_drvdata(pdev);
3362        ha = base_vha->hw;
3363
3364        /* Indicate device removal to prevent future board_disable and wait
3365         * until any pending board_disable has completed. */
3366        set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3367        cancel_work_sync(&ha->board_disable);
3368
3369        /*
3370         * If the PCI device is disabled then there was a PCI-disconnect and
3371         * qla2x00_disable_board_on_pci_error has taken care of most of the
3372         * resources.
3373         */
3374        if (!atomic_read(&pdev->enable_cnt)) {
3375                scsi_host_put(base_vha->host);
3376                kfree(ha);
3377                pci_set_drvdata(pdev, NULL);
3378                return;
3379        }
3380
3381        qla2x00_wait_for_hba_ready(base_vha);
3382
3383        /* if UNLOAD flag is already set, then continue unload,
3384         * where it was set first.
3385         */
3386        if (test_bit(UNLOADING, &base_vha->dpc_flags))
3387                return;
3388
3389        set_bit(UNLOADING, &base_vha->dpc_flags);
3390
3391        if (IS_QLAFX00(ha))
3392                qlafx00_driver_shutdown(base_vha, 20);
3393
3394        qla2x00_delete_all_vps(ha, base_vha);
3395
3396        if (IS_QLA8031(ha)) {
3397                ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3398                    "Clearing fcoe driver presence.\n");
3399                if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3400                        ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3401                            "Error while clearing DRV-Presence.\n");
3402        }
3403
3404        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3405
3406        qla2x00_dfs_remove(base_vha);
3407
3408        qla84xx_put_chip(base_vha);
3409
3410        /* Laser should be disabled only for ISP2031 */
3411        if (IS_QLA2031(ha))
3412                qla83xx_disable_laser(base_vha);
3413
3414        /* Disable timer */
3415        if (base_vha->timer_active)
3416                qla2x00_stop_timer(base_vha);
3417
3418        base_vha->flags.online = 0;
3419
3420        /* free DMA memory */
3421        if (ha->exlogin_buf)
3422                qla2x00_free_exlogin_buffer(ha);
3423
3424        /* free DMA memory */
3425        if (ha->exchoffld_buf)
3426                qla2x00_free_exchoffld_buffer(ha);
3427
3428        qla2x00_destroy_deferred_work(ha);
3429
3430        qlt_remove_target(ha, base_vha);
3431
3432        qla2x00_free_sysfs_attr(base_vha, true);
3433
3434        fc_remove_host(base_vha->host);
3435
3436        scsi_remove_host(base_vha->host);
3437
3438        qla2x00_free_device(base_vha);
3439
3440        qla2x00_clear_drv_active(ha);
3441
3442        scsi_host_put(base_vha->host);
3443
3444        qla2x00_unmap_iobases(ha);
3445
3446        pci_release_selected_regions(ha->pdev, ha->bars);
3447        kfree(ha);
3448        ha = NULL;
3449
3450        pci_disable_pcie_error_reporting(pdev);
3451
3452        pci_disable_device(pdev);
3453}
3454
3455static void
3456qla2x00_free_device(scsi_qla_host_t *vha)
3457{
3458        struct qla_hw_data *ha = vha->hw;
3459
3460        qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3461
3462        /* Disable timer */
3463        if (vha->timer_active)
3464                qla2x00_stop_timer(vha);
3465
3466        qla25xx_delete_queues(vha);
3467
3468        if (ha->flags.fce_enabled)
3469                qla2x00_disable_fce_trace(vha, NULL, NULL);
3470
3471        if (ha->eft)
3472                qla2x00_disable_eft_trace(vha);
3473
3474        /* Stop currently executing firmware. */
3475        qla2x00_try_to_stop_firmware(vha);
3476
3477        vha->flags.online = 0;
3478
3479        /* turn-off interrupts on the card */
3480        if (ha->interrupts_on) {
3481                vha->flags.init_done = 0;
3482                ha->isp_ops->disable_intrs(ha);
3483        }
3484
3485        qla2x00_free_fcports(vha);
3486
3487        qla2x00_free_irqs(vha);
3488
3489        /* Flush the work queue and remove it */
3490        if (ha->wq) {
3491                flush_workqueue(ha->wq);
3492                destroy_workqueue(ha->wq);
3493                ha->wq = NULL;
3494        }
3495
3496
3497        qla2x00_mem_free(ha);
3498
3499        qla82xx_md_free(vha);
3500
3501        qla2x00_free_queues(ha);
3502}
3503
3504void qla2x00_free_fcports(struct scsi_qla_host *vha)
3505{
3506        fc_port_t *fcport, *tfcport;
3507
3508        list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
3509                list_del(&fcport->list);
3510                qla2x00_clear_loop_id(fcport);
3511                kfree(fcport);
3512                fcport = NULL;
3513        }
3514}
3515
3516static inline void
3517qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3518    int defer)
3519{
3520        struct fc_rport *rport;
3521        scsi_qla_host_t *base_vha;
3522        unsigned long flags;
3523
3524        if (!fcport->rport)
3525                return;
3526
3527        rport = fcport->rport;
3528        if (defer) {
3529                base_vha = pci_get_drvdata(vha->hw->pdev);
3530                spin_lock_irqsave(vha->host->host_lock, flags);
3531                fcport->drport = rport;
3532                spin_unlock_irqrestore(vha->host->host_lock, flags);
3533                qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3534                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3535                qla2xxx_wake_dpc(base_vha);
3536        } else {
3537                int now;
3538                if (rport)
3539                        fc_remote_port_delete(rport);
3540                qlt_do_generation_tick(vha, &now);
3541                qlt_fc_port_deleted(vha, fcport, now);
3542        }
3543}
3544
3545/*
3546 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
3547 *
3548 * Input: ha = adapter block pointer.  fcport = port structure pointer.
3549 *
3550 * Return: None.
3551 *
3552 * Context:
3553 */
3554void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
3555    int do_login, int defer)
3556{
3557        if (IS_QLAFX00(vha->hw)) {
3558                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3559                qla2x00_schedule_rport_del(vha, fcport, defer);
3560                return;
3561        }
3562
3563        if (atomic_read(&fcport->state) == FCS_ONLINE &&
3564            vha->vp_idx == fcport->vha->vp_idx) {
3565                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3566                qla2x00_schedule_rport_del(vha, fcport, defer);
3567        }
3568        /*
3569         * We may need to retry the login, so don't change the state of the
3570         * port but do the retries.
3571         */
3572        if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
3573                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3574
3575        if (!do_login)
3576                return;
3577
3578        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3579
3580        if (fcport->login_retry == 0) {
3581                fcport->login_retry = vha->hw->login_retry_count;
3582
3583                ql_dbg(ql_dbg_disc, vha, 0x2067,
3584                    "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
3585                    fcport->port_name, fcport->loop_id, fcport->login_retry);
3586        }
3587}
3588
3589/*
3590 * qla2x00_mark_all_devices_lost
3591 *      Updates fcport state when device goes offline.
3592 *
3593 * Input:
3594 *      ha = adapter block pointer.
3595 *      fcport = port structure pointer.
3596 *
3597 * Return:
3598 *      None.
3599 *
3600 * Context:
3601 */
3602void
3603qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
3604{
3605        fc_port_t *fcport;
3606
3607        list_for_each_entry(fcport, &vha->vp_fcports, list) {
3608                if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
3609                        continue;
3610
3611                /*
3612                 * No point in marking the device as lost, if the device is
3613                 * already DEAD.
3614                 */
3615                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
3616                        continue;
3617                if (atomic_read(&fcport->state) == FCS_ONLINE) {
3618                        qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3619                        if (defer)
3620                                qla2x00_schedule_rport_del(vha, fcport, defer);
3621                        else if (vha->vp_idx == fcport->vha->vp_idx)
3622                                qla2x00_schedule_rport_del(vha, fcport, defer);
3623                }
3624        }
3625}
3626
3627/*
3628* qla2x00_mem_alloc
3629*      Allocates adapter memory.
3630*
3631* Returns:
3632*      0  = success.
3633*      !0  = failure.
3634*/
3635static int
3636qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3637        struct req_que **req, struct rsp_que **rsp)
3638{
3639        char    name[16];
3640
3641        ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
3642                &ha->init_cb_dma, GFP_KERNEL);
3643        if (!ha->init_cb)
3644                goto fail;
3645
3646        if (qlt_mem_alloc(ha) < 0)
3647                goto fail_free_init_cb;
3648
3649        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
3650                qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
3651        if (!ha->gid_list)
3652                goto fail_free_tgt_mem;
3653
3654        ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
3655        if (!ha->srb_mempool)
3656                goto fail_free_gid_list;
3657
3658        if (IS_P3P_TYPE(ha)) {
3659                /* Allocate cache for CT6 Ctx. */
3660                if (!ctx_cachep) {
3661                        ctx_cachep = kmem_cache_create("qla2xxx_ctx",
3662                                sizeof(struct ct6_dsd), 0,
3663                                SLAB_HWCACHE_ALIGN, NULL);
3664                        if (!ctx_cachep)
3665                                goto fail_free_srb_mempool;
3666                }
3667                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
3668                        ctx_cachep);
3669                if (!ha->ctx_mempool)
3670                        goto fail_free_srb_mempool;
3671                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
3672                    "ctx_cachep=%p ctx_mempool=%p.\n",
3673                    ctx_cachep, ha->ctx_mempool);
3674        }
3675
3676        /* Get memory for cached NVRAM */
3677        ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
3678        if (!ha->nvram)
3679                goto fail_free_ctx_mempool;
3680
3681        snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
3682                ha->pdev->device);
3683        ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
3684                DMA_POOL_SIZE, 8, 0);
3685        if (!ha->s_dma_pool)
3686                goto fail_free_nvram;
3687
3688        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
3689            "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
3690            ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
3691
3692        if (IS_P3P_TYPE(ha) || ql2xenabledif) {
3693                ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
3694                        DSD_LIST_DMA_POOL_SIZE, 8, 0);
3695                if (!ha->dl_dma_pool) {
3696                        ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
3697                            "Failed to allocate memory for dl_dma_pool.\n");
3698                        goto fail_s_dma_pool;
3699                }
3700
3701                ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
3702                        FCP_CMND_DMA_POOL_SIZE, 8, 0);
3703                if (!ha->fcp_cmnd_dma_pool) {
3704                        ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
3705                            "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
3706                        goto fail_dl_dma_pool;
3707                }
3708                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
3709                    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
3710                    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
3711        }
3712
3713        /* Allocate memory for SNS commands */
3714        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3715        /* Get consistent memory allocated for SNS commands */
3716                ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
3717                sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
3718                if (!ha->sns_cmd)
3719                        goto fail_dma_pool;
3720                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
3721                    "sns_cmd: %p.\n", ha->sns_cmd);
3722        } else {
3723        /* Get consistent memory allocated for MS IOCB */
3724                ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
3725                        &ha->ms_iocb_dma);
3726                if (!ha->ms_iocb)
3727                        goto fail_dma_pool;
3728        /* Get consistent memory allocated for CT SNS commands */
3729                ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
3730                        sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
3731                if (!ha->ct_sns)
3732                        goto fail_free_ms_iocb;
3733                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
3734                    "ms_iocb=%p ct_sns=%p.\n",
3735                    ha->ms_iocb, ha->ct_sns);
3736        }
3737
3738        /* Allocate memory for request ring */
3739        *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
3740        if (!*req) {
3741                ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
3742                    "Failed to allocate memory for req.\n");
3743                goto fail_req;
3744        }
3745        (*req)->length = req_len;
3746        (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
3747                ((*req)->length + 1) * sizeof(request_t),
3748                &(*req)->dma, GFP_KERNEL);
3749        if (!(*req)->ring) {
3750                ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
3751                    "Failed to allocate memory for req_ring.\n");
3752                goto fail_req_ring;
3753        }
3754        /* Allocate memory for response ring */
3755        *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
3756        if (!*rsp) {
3757                ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
3758                    "Failed to allocate memory for rsp.\n");
3759                goto fail_rsp;
3760        }
3761        (*rsp)->hw = ha;
3762        (*rsp)->length = rsp_len;
3763        (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
3764                ((*rsp)->length + 1) * sizeof(response_t),
3765                &(*rsp)->dma, GFP_KERNEL);
3766        if (!(*rsp)->ring) {
3767                ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
3768                    "Failed to allocate memory for rsp_ring.\n");
3769                goto fail_rsp_ring;
3770        }
3771        (*req)->rsp = *rsp;
3772        (*rsp)->req = *req;
3773        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
3774            "req=%p req->length=%d req->ring=%p rsp=%p "
3775            "rsp->length=%d rsp->ring=%p.\n",
3776            *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
3777            (*rsp)->ring);
3778        /* Allocate memory for NVRAM data for vports */
3779        if (ha->nvram_npiv_size) {
3780                ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
3781                    ha->nvram_npiv_size, GFP_KERNEL);
3782                if (!ha->npiv_info) {
3783                        ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
3784                            "Failed to allocate memory for npiv_info.\n");
3785                        goto fail_npiv_info;
3786                }
3787        } else
3788                ha->npiv_info = NULL;
3789
3790        /* Get consistent memory allocated for EX-INIT-CB. */
3791        if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3792                ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
3793                    &ha->ex_init_cb_dma);
3794                if (!ha->ex_init_cb)
3795                        goto fail_ex_init_cb;
3796                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
3797                    "ex_init_cb=%p.\n", ha->ex_init_cb);
3798        }
3799
3800        INIT_LIST_HEAD(&ha->gbl_dsd_list);
3801
3802        /* Get consistent memory allocated for Async Port-Database. */
3803        if (!IS_FWI2_CAPABLE(ha)) {
3804                ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
3805                        &ha->async_pd_dma);
3806                if (!ha->async_pd)
3807                        goto fail_async_pd;
3808                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
3809                    "async_pd=%p.\n", ha->async_pd);
3810        }
3811
3812        INIT_LIST_HEAD(&ha->vp_list);
3813
3814        /* Allocate memory for our loop_id bitmap */
3815        ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3816            GFP_KERNEL);
3817        if (!ha->loop_id_map)
3818                goto fail_loop_id_map;
3819        else {
3820                qla2x00_set_reserved_loop_ids(ha);
3821                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3822                    "loop_id_map=%p.\n", ha->loop_id_map);
3823        }
3824
3825        return 0;
3826
3827fail_loop_id_map:
3828        dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
3829fail_async_pd:
3830        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
3831fail_ex_init_cb:
3832        kfree(ha->npiv_info);
3833fail_npiv_info:
3834        dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
3835                sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
3836        (*rsp)->ring = NULL;
3837        (*rsp)->dma = 0;
3838fail_rsp_ring:
3839        kfree(*rsp);
3840fail_rsp:
3841        dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
3842                sizeof(request_t), (*req)->ring, (*req)->dma);
3843        (*req)->ring = NULL;
3844        (*req)->dma = 0;
3845fail_req_ring:
3846        kfree(*req);
3847fail_req:
3848        dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
3849                ha->ct_sns, ha->ct_sns_dma);
3850        ha->ct_sns = NULL;
3851        ha->ct_sns_dma = 0;
3852fail_free_ms_iocb:
3853        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
3854        ha->ms_iocb = NULL;
3855        ha->ms_iocb_dma = 0;
3856
3857        if (ha->sns_cmd)
3858                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
3859                    ha->sns_cmd, ha->sns_cmd_dma);
3860fail_dma_pool:
3861        if (IS_QLA82XX(ha) || ql2xenabledif) {
3862                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
3863                ha->fcp_cmnd_dma_pool = NULL;
3864        }
3865fail_dl_dma_pool:
3866        if (IS_QLA82XX(ha) || ql2xenabledif) {
3867                dma_pool_destroy(ha->dl_dma_pool);
3868                ha->dl_dma_pool = NULL;
3869        }
3870fail_s_dma_pool:
3871        dma_pool_destroy(ha->s_dma_pool);
3872        ha->s_dma_pool = NULL;
3873fail_free_nvram:
3874        kfree(ha->nvram);
3875        ha->nvram = NULL;
3876fail_free_ctx_mempool:
3877        if (ha->ctx_mempool)
3878                mempool_destroy(ha->ctx_mempool);
3879        ha->ctx_mempool = NULL;
3880fail_free_srb_mempool:
3881        if (ha->srb_mempool)
3882                mempool_destroy(ha->srb_mempool);
3883        ha->srb_mempool = NULL;
3884fail_free_gid_list:
3885        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
3886        ha->gid_list,
3887        ha->gid_list_dma);
3888        ha->gid_list = NULL;
3889        ha->gid_list_dma = 0;
3890fail_free_tgt_mem:
3891        qlt_mem_free(ha);
3892fail_free_init_cb:
3893        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
3894        ha->init_cb_dma);
3895        ha->init_cb = NULL;
3896        ha->init_cb_dma = 0;
3897fail:
3898        ql_log(ql_log_fatal, NULL, 0x0030,
3899            "Memory allocation failure.\n");
3900        return -ENOMEM;
3901}
3902
3903int
3904qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
3905{
3906        int rval;
3907        uint16_t        size, max_cnt, temp;
3908        struct qla_hw_data *ha = vha->hw;
3909
3910        /* Return if we don't need to alloacate any extended logins */
3911        if (!ql2xexlogins)
3912                return QLA_SUCCESS;
3913
3914        ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
3915        max_cnt = 0;
3916        rval = qla_get_exlogin_status(vha, &size, &max_cnt);
3917        if (rval != QLA_SUCCESS) {
3918                ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
3919                    "Failed to get exlogin status.\n");
3920                return rval;
3921        }
3922
3923        temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
3924        ha->exlogin_size = (size * temp);
3925        ql_log(ql_log_info, vha, 0xd024,
3926                "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
3927                max_cnt, size, temp);
3928
3929        ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
3930                ha->exlogin_size);
3931
3932        /* Get consistent memory for extended logins */
3933        ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
3934            ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
3935        if (!ha->exlogin_buf) {
3936                ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
3937                    "Failed to allocate memory for exlogin_buf_dma.\n");
3938                return -ENOMEM;
3939        }
3940
3941        /* Now configure the dma buffer */
3942        rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
3943        if (rval) {
3944                ql_log(ql_log_fatal, vha, 0x00cf,
3945                    "Setup extended login buffer  ****FAILED****.\n");
3946                qla2x00_free_exlogin_buffer(ha);
3947        }
3948
3949        return rval;
3950}
3951
3952/*
3953* qla2x00_free_exlogin_buffer
3954*
3955* Input:
3956*       ha = adapter block pointer
3957*/
3958void
3959qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
3960{
3961        if (ha->exlogin_buf) {
3962                dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
3963                    ha->exlogin_buf, ha->exlogin_buf_dma);
3964                ha->exlogin_buf = NULL;
3965                ha->exlogin_size = 0;
3966        }
3967}
3968
3969int
3970qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
3971{
3972        int rval;
3973        uint16_t        size, max_cnt, temp;
3974        struct qla_hw_data *ha = vha->hw;
3975
3976        /* Return if we don't need to alloacate any extended logins */
3977        if (!ql2xexchoffld)
3978                return QLA_SUCCESS;
3979
3980        ql_log(ql_log_info, vha, 0xd014,
3981            "Exchange offload count: %d.\n", ql2xexlogins);
3982
3983        max_cnt = 0;
3984        rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
3985        if (rval != QLA_SUCCESS) {
3986                ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
3987                    "Failed to get exlogin status.\n");
3988                return rval;
3989        }
3990
3991        temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
3992        ha->exchoffld_size = (size * temp);
3993        ql_log(ql_log_info, vha, 0xd016,
3994                "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
3995                max_cnt, size, temp);
3996
3997        ql_log(ql_log_info, vha, 0xd017,
3998            "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
3999
4000        /* Get consistent memory for extended logins */
4001        ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
4002            ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
4003        if (!ha->exchoffld_buf) {
4004                ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4005                    "Failed to allocate memory for exchoffld_buf_dma.\n");
4006                return -ENOMEM;
4007        }
4008
4009        /* Now configure the dma buffer */
4010        rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
4011        if (rval) {
4012                ql_log(ql_log_fatal, vha, 0xd02e,
4013                    "Setup exchange offload buffer ****FAILED****.\n");
4014                qla2x00_free_exchoffld_buffer(ha);
4015        }
4016
4017        return rval;
4018}
4019
4020/*
4021* qla2x00_free_exchoffld_buffer
4022*
4023* Input:
4024*       ha = adapter block pointer
4025*/
4026void
4027qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
4028{
4029        if (ha->exchoffld_buf) {
4030                dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
4031                    ha->exchoffld_buf, ha->exchoffld_buf_dma);
4032                ha->exchoffld_buf = NULL;
4033                ha->exchoffld_size = 0;
4034        }
4035}
4036
4037/*
4038* qla2x00_free_fw_dump
4039*       Frees fw dump stuff.
4040*
4041* Input:
4042*       ha = adapter block pointer
4043*/
4044static void
4045qla2x00_free_fw_dump(struct qla_hw_data *ha)
4046{
4047        if (ha->fce)
4048                dma_free_coherent(&ha->pdev->dev,
4049                    FCE_SIZE, ha->fce, ha->fce_dma);
4050
4051        if (ha->eft)
4052                dma_free_coherent(&ha->pdev->dev,
4053                    EFT_SIZE, ha->eft, ha->eft_dma);
4054
4055        if (ha->fw_dump)
4056                vfree(ha->fw_dump);
4057        if (ha->fw_dump_template)
4058                vfree(ha->fw_dump_template);
4059
4060        ha->fce = NULL;
4061        ha->fce_dma = 0;
4062        ha->eft = NULL;
4063        ha->eft_dma = 0;
4064        ha->fw_dumped = 0;
4065        ha->fw_dump_cap_flags = 0;
4066        ha->fw_dump_reading = 0;
4067        ha->fw_dump = NULL;
4068        ha->fw_dump_len = 0;
4069        ha->fw_dump_template = NULL;
4070        ha->fw_dump_template_len = 0;
4071}
4072
4073/*
4074* qla2x00_mem_free
4075*      Frees all adapter allocated memory.
4076*
4077* Input:
4078*      ha = adapter block pointer.
4079*/
4080static void
4081qla2x00_mem_free(struct qla_hw_data *ha)
4082{
4083        qla2x00_free_fw_dump(ha);
4084
4085        if (ha->mctp_dump)
4086                dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
4087                    ha->mctp_dump_dma);
4088
4089        if (ha->srb_mempool)
4090                mempool_destroy(ha->srb_mempool);
4091
4092        if (ha->dcbx_tlv)
4093                dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
4094                    ha->dcbx_tlv, ha->dcbx_tlv_dma);
4095
4096        if (ha->xgmac_data)
4097                dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
4098                    ha->xgmac_data, ha->xgmac_data_dma);
4099
4100        if (ha->sns_cmd)
4101                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4102                ha->sns_cmd, ha->sns_cmd_dma);
4103
4104        if (ha->ct_sns)
4105                dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4106                ha->ct_sns, ha->ct_sns_dma);
4107
4108        if (ha->sfp_data)
4109                dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
4110
4111        if (ha->ms_iocb)
4112                dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4113
4114        if (ha->ex_init_cb)
4115                dma_pool_free(ha->s_dma_pool,
4116                        ha->ex_init_cb, ha->ex_init_cb_dma);
4117
4118        if (ha->async_pd)
4119                dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4120
4121        if (ha->s_dma_pool)
4122                dma_pool_destroy(ha->s_dma_pool);
4123
4124        if (ha->gid_list)
4125                dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4126                ha->gid_list, ha->gid_list_dma);
4127
4128        if (IS_QLA82XX(ha)) {
4129                if (!list_empty(&ha->gbl_dsd_list)) {
4130                        struct dsd_dma *dsd_ptr, *tdsd_ptr;
4131
4132                        /* clean up allocated prev pool */
4133                        list_for_each_entry_safe(dsd_ptr,
4134                                tdsd_ptr, &ha->gbl_dsd_list, list) {
4135                                dma_pool_free(ha->dl_dma_pool,
4136                                dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
4137                                list_del(&dsd_ptr->list);
4138                                kfree(dsd_ptr);
4139                        }
4140                }
4141        }
4142
4143        if (ha->dl_dma_pool)
4144                dma_pool_destroy(ha->dl_dma_pool);
4145
4146        if (ha->fcp_cmnd_dma_pool)
4147                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4148
4149        if (ha->ctx_mempool)
4150                mempool_destroy(ha->ctx_mempool);
4151
4152        qlt_mem_free(ha);
4153
4154        if (ha->init_cb)
4155                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4156                        ha->init_cb, ha->init_cb_dma);
4157        vfree(ha->optrom_buffer);
4158        kfree(ha->nvram);
4159        kfree(ha->npiv_info);
4160        kfree(ha->swl);
4161        kfree(ha->loop_id_map);
4162
4163        ha->srb_mempool = NULL;
4164        ha->ctx_mempool = NULL;
4165        ha->sns_cmd = NULL;
4166        ha->sns_cmd_dma = 0;
4167        ha->ct_sns = NULL;
4168        ha->ct_sns_dma = 0;
4169        ha->ms_iocb = NULL;
4170        ha->ms_iocb_dma = 0;
4171        ha->init_cb = NULL;
4172        ha->init_cb_dma = 0;
4173        ha->ex_init_cb = NULL;
4174        ha->ex_init_cb_dma = 0;
4175        ha->async_pd = NULL;
4176        ha->async_pd_dma = 0;
4177
4178        ha->s_dma_pool = NULL;
4179        ha->dl_dma_pool = NULL;
4180        ha->fcp_cmnd_dma_pool = NULL;
4181
4182        ha->gid_list = NULL;
4183        ha->gid_list_dma = 0;
4184
4185        ha->tgt.atio_ring = NULL;
4186        ha->tgt.atio_dma = 0;
4187        ha->tgt.tgt_vp_map = NULL;
4188}
4189
4190struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4191                                                struct qla_hw_data *ha)
4192{
4193        struct Scsi_Host *host;
4194        struct scsi_qla_host *vha = NULL;
4195
4196        host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
4197        if (host == NULL) {
4198                ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
4199                    "Failed to allocate host from the scsi layer, aborting.\n");
4200                goto fail;
4201        }
4202
4203        /* Clear our data area */
4204        vha = shost_priv(host);
4205        memset(vha, 0, sizeof(scsi_qla_host_t));
4206
4207        vha->host = host;
4208        vha->host_no = host->host_no;
4209        vha->hw = ha;
4210
4211        INIT_LIST_HEAD(&vha->vp_fcports);
4212        INIT_LIST_HEAD(&vha->work_list);
4213        INIT_LIST_HEAD(&vha->list);
4214        INIT_LIST_HEAD(&vha->qla_cmd_list);
4215        INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
4216        INIT_LIST_HEAD(&vha->logo_list);
4217        INIT_LIST_HEAD(&vha->plogi_ack_list);
4218        INIT_LIST_HEAD(&vha->qp_list);
4219
4220        spin_lock_init(&vha->work_lock);
4221        spin_lock_init(&vha->cmd_list_lock);
4222
4223        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
4224        ql_dbg(ql_dbg_init, vha, 0x0041,
4225            "Allocated the host=%p hw=%p vha=%p dev_name=%s",
4226            vha->host, vha->hw, vha,
4227            dev_name(&(ha->pdev->dev)));
4228
4229        return vha;
4230
4231fail:
4232        return vha;
4233}
4234
4235static struct qla_work_evt *
4236qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
4237{
4238        struct qla_work_evt *e;
4239        uint8_t bail;
4240
4241        QLA_VHA_MARK_BUSY(vha, bail);
4242        if (bail)
4243                return NULL;
4244
4245        e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
4246        if (!e) {
4247                QLA_VHA_MARK_NOT_BUSY(vha);
4248                return NULL;
4249        }
4250
4251        INIT_LIST_HEAD(&e->list);
4252        e->type = type;
4253        e->flags = QLA_EVT_FLAG_FREE;
4254        return e;
4255}
4256
4257static int
4258qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
4259{
4260        unsigned long flags;
4261
4262        spin_lock_irqsave(&vha->work_lock, flags);
4263        list_add_tail(&e->list, &vha->work_list);
4264        spin_unlock_irqrestore(&vha->work_lock, flags);
4265        qla2xxx_wake_dpc(vha);
4266
4267        return QLA_SUCCESS;
4268}
4269
4270int
4271qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
4272    u32 data)
4273{
4274        struct qla_work_evt *e;
4275
4276        e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
4277        if (!e)
4278                return QLA_FUNCTION_FAILED;
4279
4280        e->u.aen.code = code;
4281        e->u.aen.data = data;
4282        return qla2x00_post_work(vha, e);
4283}
4284
4285int
4286qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
4287{
4288        struct qla_work_evt *e;
4289
4290        e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
4291        if (!e)
4292                return QLA_FUNCTION_FAILED;
4293
4294        memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4295        return qla2x00_post_work(vha, e);
4296}
4297
4298#define qla2x00_post_async_work(name, type)     \
4299int qla2x00_post_async_##name##_work(           \
4300    struct scsi_qla_host *vha,                  \
4301    fc_port_t *fcport, uint16_t *data)          \
4302{                                               \
4303        struct qla_work_evt *e;                 \
4304                                                \
4305        e = qla2x00_alloc_work(vha, type);      \
4306        if (!e)                                 \
4307                return QLA_FUNCTION_FAILED;     \
4308                                                \
4309        e->u.logio.fcport = fcport;             \
4310        if (data) {                             \
4311                e->u.logio.data[0] = data[0];   \
4312                e->u.logio.data[1] = data[1];   \
4313        }                                       \
4314        return qla2x00_post_work(vha, e);       \
4315}
4316
4317qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
4318qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
4319qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
4320qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
4321qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
4322qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
4323
4324int
4325qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
4326{
4327        struct qla_work_evt *e;
4328
4329        e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
4330        if (!e)
4331                return QLA_FUNCTION_FAILED;
4332
4333        e->u.uevent.code = code;
4334        return qla2x00_post_work(vha, e);
4335}
4336
4337static void
4338qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
4339{
4340        char event_string[40];
4341        char *envp[] = { event_string, NULL };
4342
4343        switch (code) {
4344        case QLA_UEVENT_CODE_FW_DUMP:
4345                snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
4346                    vha->host_no);
4347                break;
4348        default:
4349                /* do nothing */
4350                break;
4351        }
4352        kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
4353}
4354
4355int
4356qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
4357                        uint32_t *data, int cnt)
4358{
4359        struct qla_work_evt *e;
4360
4361        e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
4362        if (!e)
4363                return QLA_FUNCTION_FAILED;
4364
4365        e->u.aenfx.evtcode = evtcode;
4366        e->u.aenfx.count = cnt;
4367        memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
4368        return qla2x00_post_work(vha, e);
4369}
4370
4371void
4372qla2x00_do_work(struct scsi_qla_host *vha)
4373{
4374        struct qla_work_evt *e, *tmp;
4375        unsigned long flags;
4376        LIST_HEAD(work);
4377
4378        spin_lock_irqsave(&vha->work_lock, flags);
4379        list_splice_init(&vha->work_list, &work);
4380        spin_unlock_irqrestore(&vha->work_lock, flags);
4381
4382        list_for_each_entry_safe(e, tmp, &work, list) {
4383                list_del_init(&e->list);
4384
4385                switch (e->type) {
4386                case QLA_EVT_AEN:
4387                        fc_host_post_event(vha->host, fc_get_event_number(),
4388                            e->u.aen.code, e->u.aen.data);
4389                        break;
4390                case QLA_EVT_IDC_ACK:
4391                        qla81xx_idc_ack(vha, e->u.idc_ack.mb);
4392                        break;
4393                case QLA_EVT_ASYNC_LOGIN:
4394                        qla2x00_async_login(vha, e->u.logio.fcport,
4395                            e->u.logio.data);
4396                        break;
4397                case QLA_EVT_ASYNC_LOGIN_DONE:
4398                        qla2x00_async_login_done(vha, e->u.logio.fcport,
4399                            e->u.logio.data);
4400                        break;
4401                case QLA_EVT_ASYNC_LOGOUT:
4402                        qla2x00_async_logout(vha, e->u.logio.fcport);
4403                        break;
4404                case QLA_EVT_ASYNC_LOGOUT_DONE:
4405                        qla2x00_async_logout_done(vha, e->u.logio.fcport,
4406                            e->u.logio.data);
4407                        break;
4408                case QLA_EVT_ASYNC_ADISC:
4409                        qla2x00_async_adisc(vha, e->u.logio.fcport,
4410                            e->u.logio.data);
4411                        break;
4412                case QLA_EVT_ASYNC_ADISC_DONE:
4413                        qla2x00_async_adisc_done(vha, e->u.logio.fcport,
4414                            e->u.logio.data);
4415                        break;
4416                case QLA_EVT_UEVENT:
4417                        qla2x00_uevent_emit(vha, e->u.uevent.code);
4418                        break;
4419                case QLA_EVT_AENFX:
4420                        qlafx00_process_aen(vha, e);
4421                        break;
4422                }
4423                if (e->flags & QLA_EVT_FLAG_FREE)
4424                        kfree(e);
4425
4426                /* For each work completed decrement vha ref count */
4427                QLA_VHA_MARK_NOT_BUSY(vha);
4428        }
4429}
4430
4431/* Relogins all the fcports of a vport
4432 * Context: dpc thread
4433 */
4434void qla2x00_relogin(struct scsi_qla_host *vha)
4435{
4436        fc_port_t       *fcport;
4437        int status;
4438        uint16_t        next_loopid = 0;
4439        struct qla_hw_data *ha = vha->hw;
4440        uint16_t data[2];
4441
4442        list_for_each_entry(fcport, &vha->vp_fcports, list) {
4443        /*
4444         * If the port is not ONLINE then try to login
4445         * to it if we haven't run out of retries.
4446         */
4447                if (atomic_read(&fcport->state) != FCS_ONLINE &&
4448                    fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
4449                        fcport->login_retry--;
4450                        if (fcport->flags & FCF_FABRIC_DEVICE) {
4451                                if (fcport->flags & FCF_FCP2_DEVICE)
4452                                        ha->isp_ops->fabric_logout(vha,
4453                                                        fcport->loop_id,
4454                                                        fcport->d_id.b.domain,
4455                                                        fcport->d_id.b.area,
4456                                                        fcport->d_id.b.al_pa);
4457
4458                                if (fcport->loop_id == FC_NO_LOOP_ID) {
4459                                        fcport->loop_id = next_loopid =
4460                                            ha->min_external_loopid;
4461                                        status = qla2x00_find_new_loop_id(
4462                                            vha, fcport);
4463                                        if (status != QLA_SUCCESS) {
4464                                                /* Ran out of IDs to use */
4465                                                break;
4466                                        }
4467                                }
4468
4469                                if (IS_ALOGIO_CAPABLE(ha)) {
4470                                        fcport->flags |= FCF_ASYNC_SENT;
4471                                        data[0] = 0;
4472                                        data[1] = QLA_LOGIO_LOGIN_RETRIED;
4473                                        status = qla2x00_post_async_login_work(
4474                                            vha, fcport, data);
4475                                        if (status == QLA_SUCCESS)
4476                                                continue;
4477                                        /* Attempt a retry. */
4478                                        status = 1;
4479                                } else {
4480                                        status = qla2x00_fabric_login(vha,
4481                                            fcport, &next_loopid);
4482                                        if (status ==  QLA_SUCCESS) {
4483                                                int status2;
4484                                                uint8_t opts;
4485
4486                                                opts = 0;
4487                                                if (fcport->flags &
4488                                                    FCF_FCP2_DEVICE)
4489                                                        opts |= BIT_1;
4490                                                status2 =
4491                                                    qla2x00_get_port_database(
4492                                                        vha, fcport, opts);
4493                                                if (status2 != QLA_SUCCESS)
4494                                                        status = 1;
4495                                        }
4496                                }
4497                        } else
4498                                status = qla2x00_local_device_login(vha,
4499                                                                fcport);
4500
4501                        if (status == QLA_SUCCESS) {
4502                                fcport->old_loop_id = fcport->loop_id;
4503
4504                                ql_dbg(ql_dbg_disc, vha, 0x2003,
4505                                    "Port login OK: logged in ID 0x%x.\n",
4506                                    fcport->loop_id);
4507
4508                                qla2x00_update_fcport(vha, fcport);
4509
4510                        } else if (status == 1) {
4511                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4512                                /* retry the login again */
4513                                ql_dbg(ql_dbg_disc, vha, 0x2007,
4514                                    "Retrying %d login again loop_id 0x%x.\n",
4515                                    fcport->login_retry, fcport->loop_id);
4516                        } else {
4517                                fcport->login_retry = 0;
4518                        }
4519
4520                        if (fcport->login_retry == 0 && status != QLA_SUCCESS)
4521                                qla2x00_clear_loop_id(fcport);
4522                }
4523                if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4524                        break;
4525        }
4526}
4527
4528/* Schedule work on any of the dpc-workqueues */
4529void
4530qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
4531{
4532        struct qla_hw_data *ha = base_vha->hw;
4533
4534        switch (work_code) {
4535        case MBA_IDC_AEN: /* 0x8200 */
4536                if (ha->dpc_lp_wq)
4537                        queue_work(ha->dpc_lp_wq, &ha->idc_aen);
4538                break;
4539
4540        case QLA83XX_NIC_CORE_RESET: /* 0x1 */
4541                if (!ha->flags.nic_core_reset_hdlr_active) {
4542                        if (ha->dpc_hp_wq)
4543                                queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
4544                } else
4545                        ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
4546                            "NIC Core reset is already active. Skip "
4547                            "scheduling it again.\n");
4548                break;
4549        case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
4550                if (ha->dpc_hp_wq)
4551                        queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
4552                break;
4553        case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
4554                if (ha->dpc_hp_wq)
4555                        queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
4556                break;
4557        default:
4558                ql_log(ql_log_warn, base_vha, 0xb05f,
4559                    "Unknown work-code=0x%x.\n", work_code);
4560        }
4561
4562        return;
4563}
4564
4565/* Work: Perform NIC Core Unrecoverable state handling */
4566void
4567qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
4568{
4569        struct qla_hw_data *ha =
4570                container_of(work, struct qla_hw_data, nic_core_unrecoverable);
4571        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4572        uint32_t dev_state = 0;
4573
4574        qla83xx_idc_lock(base_vha, 0);
4575        qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4576        qla83xx_reset_ownership(base_vha);
4577        if (ha->flags.nic_core_reset_owner) {
4578                ha->flags.nic_core_reset_owner = 0;
4579                qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
4580                    QLA8XXX_DEV_FAILED);
4581                ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
4582                qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
4583        }
4584        qla83xx_idc_unlock(base_vha, 0);
4585}
4586
4587/* Work: Execute IDC state handler */
4588void
4589qla83xx_idc_state_handler_work(struct work_struct *work)
4590{
4591        struct qla_hw_data *ha =
4592                container_of(work, struct qla_hw_data, idc_state_handler);
4593        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4594        uint32_t dev_state = 0;
4595
4596        qla83xx_idc_lock(base_vha, 0);
4597        qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4598        if (dev_state == QLA8XXX_DEV_FAILED ||
4599                        dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
4600                qla83xx_idc_state_handler(base_vha);
4601        qla83xx_idc_unlock(base_vha, 0);
4602}
4603
4604static int
4605qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
4606{
4607        int rval = QLA_SUCCESS;
4608        unsigned long heart_beat_wait = jiffies + (1 * HZ);
4609        uint32_t heart_beat_counter1, heart_beat_counter2;
4610
4611        do {
4612                if (time_after(jiffies, heart_beat_wait)) {
4613                        ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
4614                            "Nic Core f/w is not alive.\n");
4615                        rval = QLA_FUNCTION_FAILED;
4616                        break;
4617                }
4618
4619                qla83xx_idc_lock(base_vha, 0);
4620                qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
4621                    &heart_beat_counter1);
4622                qla83xx_idc_unlock(base_vha, 0);
4623                msleep(100);
4624                qla83xx_idc_lock(base_vha, 0);
4625                qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
4626                    &heart_beat_counter2);
4627                qla83xx_idc_unlock(base_vha, 0);
4628        } while (heart_beat_counter1 == heart_beat_counter2);
4629
4630        return rval;
4631}
4632
4633/* Work: Perform NIC Core Reset handling */
4634void
4635qla83xx_nic_core_reset_work(struct work_struct *work)
4636{
4637        struct qla_hw_data *ha =
4638                container_of(work, struct qla_hw_data, nic_core_reset);
4639        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4640        uint32_t dev_state = 0;
4641
4642        if (IS_QLA2031(ha)) {
4643                if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
4644                        ql_log(ql_log_warn, base_vha, 0xb081,
4645                            "Failed to dump mctp\n");
4646                return;
4647        }
4648
4649        if (!ha->flags.nic_core_reset_hdlr_active) {
4650                if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
4651                        qla83xx_idc_lock(base_vha, 0);
4652                        qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
4653                            &dev_state);
4654                        qla83xx_idc_unlock(base_vha, 0);
4655                        if (dev_state != QLA8XXX_DEV_NEED_RESET) {
4656                                ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
4657                                    "Nic Core f/w is alive.\n");
4658                                return;
4659                        }
4660                }
4661
4662                ha->flags.nic_core_reset_hdlr_active = 1;
4663                if (qla83xx_nic_core_reset(base_vha)) {
4664                        /* NIC Core reset failed. */
4665                        ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
4666                            "NIC Core reset failed.\n");
4667                }
4668                ha->flags.nic_core_reset_hdlr_active = 0;
4669        }
4670}
4671
4672/* Work: Handle 8200 IDC aens */
4673void
4674qla83xx_service_idc_aen(struct work_struct *work)
4675{
4676        struct qla_hw_data *ha =
4677                container_of(work, struct qla_hw_data, idc_aen);
4678        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4679        uint32_t dev_state, idc_control;
4680
4681        qla83xx_idc_lock(base_vha, 0);
4682        qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4683        qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
4684        qla83xx_idc_unlock(base_vha, 0);
4685        if (dev_state == QLA8XXX_DEV_NEED_RESET) {
4686                if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
4687                        ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
4688                            "Application requested NIC Core Reset.\n");
4689                        qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
4690                } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
4691                    QLA_SUCCESS) {
4692                        ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
4693                            "Other protocol driver requested NIC Core Reset.\n");
4694                        qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
4695                }
4696        } else if (dev_state == QLA8XXX_DEV_FAILED ||
4697                        dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
4698                qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
4699        }
4700}
4701
4702static void
4703qla83xx_wait_logic(void)
4704{
4705        int i;
4706
4707        /* Yield CPU */
4708        if (!in_interrupt()) {
4709                /*
4710                 * Wait about 200ms before retrying again.
4711                 * This controls the number of retries for single
4712                 * lock operation.
4713                 */
4714                msleep(100);
4715                schedule();
4716        } else {
4717                for (i = 0; i < 20; i++)
4718                        cpu_relax(); /* This a nop instr on i386 */
4719        }
4720}
4721
4722static int
4723qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
4724{
4725        int rval;
4726        uint32_t data;
4727        uint32_t idc_lck_rcvry_stage_mask = 0x3;
4728        uint32_t idc_lck_rcvry_owner_mask = 0x3c;
4729        struct qla_hw_data *ha = base_vha->hw;
4730        ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
4731            "Trying force recovery of the IDC lock.\n");
4732
4733        rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
4734        if (rval)
4735                return rval;
4736
4737        if ((data & idc_lck_rcvry_stage_mask) > 0) {
4738                return QLA_SUCCESS;
4739        } else {
4740                data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
4741                rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
4742                    data);
4743                if (rval)
4744                        return rval;
4745
4746                msleep(200);
4747
4748                rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
4749                    &data);
4750                if (rval)
4751                        return rval;
4752
4753                if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
4754                        data &= (IDC_LOCK_RECOVERY_STAGE2 |
4755                                        ~(idc_lck_rcvry_stage_mask));
4756                        rval = qla83xx_wr_reg(base_vha,
4757                            QLA83XX_IDC_LOCK_RECOVERY, data);
4758                        if (rval)
4759                                return rval;
4760
4761                        /* Forcefully perform IDC UnLock */
4762                        rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
4763                            &data);
4764                        if (rval)
4765                                return rval;
4766                        /* Clear lock-id by setting 0xff */
4767                        rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4768                            0xff);
4769                        if (rval)
4770                                return rval;
4771                        /* Clear lock-recovery by setting 0x0 */
4772                        rval = qla83xx_wr_reg(base_vha,
4773                            QLA83XX_IDC_LOCK_RECOVERY, 0x0);
4774                        if (rval)
4775                                return rval;
4776                } else
4777                        return QLA_SUCCESS;
4778        }
4779
4780        return rval;
4781}
4782
4783static int
4784qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
4785{
4786        int rval = QLA_SUCCESS;
4787        uint32_t o_drv_lockid, n_drv_lockid;
4788        unsigned long lock_recovery_timeout;
4789
4790        lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
4791retry_lockid:
4792        rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
4793        if (rval)
4794                goto exit;
4795
4796        /* MAX wait time before forcing IDC Lock recovery = 2 secs */
4797        if (time_after_eq(jiffies, lock_recovery_timeout)) {
4798                if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
4799                        return QLA_SUCCESS;
4800                else
4801                        return QLA_FUNCTION_FAILED;
4802        }
4803
4804        rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
4805        if (rval)
4806                goto exit;
4807
4808        if (o_drv_lockid == n_drv_lockid) {
4809                qla83xx_wait_logic();
4810                goto retry_lockid;
4811        } else
4812                return QLA_SUCCESS;
4813
4814exit:
4815        return rval;
4816}
4817
4818void
4819qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4820{
4821        uint16_t options = (requester_id << 15) | BIT_6;
4822        uint32_t data;
4823        uint32_t lock_owner;
4824        struct qla_hw_data *ha = base_vha->hw;
4825
4826        /* IDC-lock implementation using driver-lock/lock-id remote registers */
4827retry_lock:
4828        if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
4829            == QLA_SUCCESS) {
4830                if (data) {
4831                        /* Setting lock-id to our function-number */
4832                        qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4833                            ha->portnum);
4834                } else {
4835                        qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4836                            &lock_owner);
4837                        ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
4838                            "Failed to acquire IDC lock, acquired by %d, "
4839                            "retrying...\n", lock_owner);
4840
4841                        /* Retry/Perform IDC-Lock recovery */
4842                        if (qla83xx_idc_lock_recovery(base_vha)
4843                            == QLA_SUCCESS) {
4844                                qla83xx_wait_logic();
4845                                goto retry_lock;
4846                        } else
4847                                ql_log(ql_log_warn, base_vha, 0xb075,
4848                                    "IDC Lock recovery FAILED.\n");
4849                }
4850
4851        }
4852
4853        return;
4854
4855        /* XXX: IDC-lock implementation using access-control mbx */
4856retry_lock2:
4857        if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4858                ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
4859                    "Failed to acquire IDC lock. retrying...\n");
4860                /* Retry/Perform IDC-Lock recovery */
4861                if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
4862                        qla83xx_wait_logic();
4863                        goto retry_lock2;
4864                } else
4865                        ql_log(ql_log_warn, base_vha, 0xb076,
4866                            "IDC Lock recovery FAILED.\n");
4867        }
4868
4869        return;
4870}
4871
4872void
4873qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4874{
4875#if 0
4876        uint16_t options = (requester_id << 15) | BIT_7;
4877#endif
4878        uint16_t retry;
4879        uint32_t data;
4880        struct qla_hw_data *ha = base_vha->hw;
4881
4882        /* IDC-unlock implementation using driver-unlock/lock-id
4883         * remote registers
4884         */
4885        retry = 0;
4886retry_unlock:
4887        if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
4888            == QLA_SUCCESS) {
4889                if (data == ha->portnum) {
4890                        qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
4891                        /* Clearing lock-id by setting 0xff */
4892                        qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
4893                } else if (retry < 10) {
4894                        /* SV: XXX: IDC unlock retrying needed here? */
4895
4896                        /* Retry for IDC-unlock */
4897                        qla83xx_wait_logic();
4898                        retry++;
4899                        ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
4900                            "Failed to release IDC lock, retrying=%d\n", retry);
4901                        goto retry_unlock;
4902                }
4903        } else if (retry < 10) {
4904                /* Retry for IDC-unlock */
4905                qla83xx_wait_logic();
4906                retry++;
4907                ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
4908                    "Failed to read drv-lockid, retrying=%d\n", retry);
4909                goto retry_unlock;
4910        }
4911
4912        return;
4913
4914#if 0
4915        /* XXX: IDC-unlock implementation using access-control mbx */
4916        retry = 0;
4917retry_unlock2:
4918        if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4919                if (retry < 10) {
4920                        /* Retry for IDC-unlock */
4921                        qla83xx_wait_logic();
4922                        retry++;
4923                        ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
4924                            "Failed to release IDC lock, retrying=%d\n", retry);
4925                        goto retry_unlock2;
4926                }
4927        }
4928
4929        return;
4930#endif
4931}
4932
4933int
4934__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4935{
4936        int rval = QLA_SUCCESS;
4937        struct qla_hw_data *ha = vha->hw;
4938        uint32_t drv_presence;
4939
4940        rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4941        if (rval == QLA_SUCCESS) {
4942                drv_presence |= (1 << ha->portnum);
4943                rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4944                    drv_presence);
4945        }
4946
4947        return rval;
4948}
4949
4950int
4951qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4952{
4953        int rval = QLA_SUCCESS;
4954
4955        qla83xx_idc_lock(vha, 0);
4956        rval = __qla83xx_set_drv_presence(vha);
4957        qla83xx_idc_unlock(vha, 0);
4958
4959        return rval;
4960}
4961
4962int
4963__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4964{
4965        int rval = QLA_SUCCESS;
4966        struct qla_hw_data *ha = vha->hw;
4967        uint32_t drv_presence;
4968
4969        rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4970        if (rval == QLA_SUCCESS) {
4971                drv_presence &= ~(1 << ha->portnum);
4972                rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4973                    drv_presence);
4974        }
4975
4976        return rval;
4977}
4978
4979int
4980qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4981{
4982        int rval = QLA_SUCCESS;
4983
4984        qla83xx_idc_lock(vha, 0);
4985        rval = __qla83xx_clear_drv_presence(vha);
4986        qla83xx_idc_unlock(vha, 0);
4987
4988        return rval;
4989}
4990
4991static void
4992qla83xx_need_reset_handler(scsi_qla_host_t *vha)
4993{
4994        struct qla_hw_data *ha = vha->hw;
4995        uint32_t drv_ack, drv_presence;
4996        unsigned long ack_timeout;
4997
4998        /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
4999        ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
5000        while (1) {
5001                qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5002                qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5003                if ((drv_ack & drv_presence) == drv_presence)
5004                        break;
5005
5006                if (time_after_eq(jiffies, ack_timeout)) {
5007                        ql_log(ql_log_warn, vha, 0xb067,
5008                            "RESET ACK TIMEOUT! drv_presence=0x%x "
5009                            "drv_ack=0x%x\n", drv_presence, drv_ack);
5010                        /*
5011                         * The function(s) which did not ack in time are forced
5012                         * to withdraw any further participation in the IDC
5013                         * reset.
5014                         */
5015                        if (drv_ack != drv_presence)
5016                                qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
5017                                    drv_ack);
5018                        break;
5019                }
5020
5021                qla83xx_idc_unlock(vha, 0);
5022                msleep(1000);
5023                qla83xx_idc_lock(vha, 0);
5024        }
5025
5026        qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
5027        ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
5028}
5029
5030static int
5031qla83xx_device_bootstrap(scsi_qla_host_t *vha)
5032{
5033        int rval = QLA_SUCCESS;
5034        uint32_t idc_control;
5035
5036        qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
5037        ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
5038
5039        /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
5040        __qla83xx_get_idc_control(vha, &idc_control);
5041        idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
5042        __qla83xx_set_idc_control(vha, 0);
5043
5044        qla83xx_idc_unlock(vha, 0);
5045        rval = qla83xx_restart_nic_firmware(vha);
5046        qla83xx_idc_lock(vha, 0);
5047
5048        if (rval != QLA_SUCCESS) {
5049                ql_log(ql_log_fatal, vha, 0xb06a,
5050                    "Failed to restart NIC f/w.\n");
5051                qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
5052                ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
5053        } else {
5054                ql_dbg(ql_dbg_p3p, vha, 0xb06c,
5055                    "Success in restarting nic f/w.\n");
5056                qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
5057                ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
5058        }
5059
5060        return rval;
5061}
5062
5063/* Assumes idc_lock always held on entry */
5064int
5065qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
5066{
5067        struct qla_hw_data *ha = base_vha->hw;
5068        int rval = QLA_SUCCESS;
5069        unsigned long dev_init_timeout;
5070        uint32_t dev_state;
5071
5072        /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
5073        dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
5074
5075        while (1) {
5076
5077                if (time_after_eq(jiffies, dev_init_timeout)) {
5078                        ql_log(ql_log_warn, base_vha, 0xb06e,
5079                            "Initialization TIMEOUT!\n");
5080                        /* Init timeout. Disable further NIC Core
5081                         * communication.
5082                         */
5083                        qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5084                                QLA8XXX_DEV_FAILED);
5085                        ql_log(ql_log_info, base_vha, 0xb06f,
5086                            "HW State: FAILED.\n");
5087                }
5088
5089                qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5090                switch (dev_state) {
5091                case QLA8XXX_DEV_READY:
5092                        if (ha->flags.nic_core_reset_owner)
5093                                qla83xx_idc_audit(base_vha,
5094                                    IDC_AUDIT_COMPLETION);
5095                        ha->flags.nic_core_reset_owner = 0;
5096                        ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
5097                            "Reset_owner reset by 0x%x.\n",
5098                            ha->portnum);
5099                        goto exit;
5100                case QLA8XXX_DEV_COLD:
5101                        if (ha->flags.nic_core_reset_owner)
5102                                rval = qla83xx_device_bootstrap(base_vha);
5103                        else {
5104                        /* Wait for AEN to change device-state */
5105                                qla83xx_idc_unlock(base_vha, 0);
5106                                msleep(1000);
5107                                qla83xx_idc_lock(base_vha, 0);
5108                        }
5109                        break;
5110                case QLA8XXX_DEV_INITIALIZING:
5111                        /* Wait for AEN to change device-state */
5112                        qla83xx_idc_unlock(base_vha, 0);
5113                        msleep(1000);
5114                        qla83xx_idc_lock(base_vha, 0);
5115                        break;
5116                case QLA8XXX_DEV_NEED_RESET:
5117                        if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
5118                                qla83xx_need_reset_handler(base_vha);
5119                        else {
5120                                /* Wait for AEN to change device-state */
5121                                qla83xx_idc_unlock(base_vha, 0);
5122                                msleep(1000);
5123                                qla83xx_idc_lock(base_vha, 0);
5124                        }
5125                        /* reset timeout value after need reset handler */
5126                        dev_init_timeout = jiffies +
5127                            (ha->fcoe_dev_init_timeout * HZ);
5128                        break;
5129                case QLA8XXX_DEV_NEED_QUIESCENT:
5130                        /* XXX: DEBUG for now */
5131                        qla83xx_idc_unlock(base_vha, 0);
5132                        msleep(1000);
5133                        qla83xx_idc_lock(base_vha, 0);
5134                        break;
5135                case QLA8XXX_DEV_QUIESCENT:
5136                        /* XXX: DEBUG for now */
5137                        if (ha->flags.quiesce_owner)
5138                                goto exit;
5139
5140                        qla83xx_idc_unlock(base_vha, 0);
5141                        msleep(1000);
5142                        qla83xx_idc_lock(base_vha, 0);
5143                        dev_init_timeout = jiffies +
5144                            (ha->fcoe_dev_init_timeout * HZ);
5145                        break;
5146                case QLA8XXX_DEV_FAILED:
5147                        if (ha->flags.nic_core_reset_owner)
5148                                qla83xx_idc_audit(base_vha,
5149                                    IDC_AUDIT_COMPLETION);
5150                        ha->flags.nic_core_reset_owner = 0;
5151                        __qla83xx_clear_drv_presence(base_vha);
5152                        qla83xx_idc_unlock(base_vha, 0);
5153                        qla8xxx_dev_failed_handler(base_vha);
5154                        rval = QLA_FUNCTION_FAILED;
5155                        qla83xx_idc_lock(base_vha, 0);
5156                        goto exit;
5157                case QLA8XXX_BAD_VALUE:
5158                        qla83xx_idc_unlock(base_vha, 0);
5159                        msleep(1000);
5160                        qla83xx_idc_lock(base_vha, 0);
5161                        break;
5162                default:
5163                        ql_log(ql_log_warn, base_vha, 0xb071,
5164                            "Unknown Device State: %x.\n", dev_state);
5165                        qla83xx_idc_unlock(base_vha, 0);
5166                        qla8xxx_dev_failed_handler(base_vha);
5167                        rval = QLA_FUNCTION_FAILED;
5168                        qla83xx_idc_lock(base_vha, 0);
5169                        goto exit;
5170                }
5171        }
5172
5173exit:
5174        return rval;
5175}
5176
5177void
5178qla2x00_disable_board_on_pci_error(struct work_struct *work)
5179{
5180        struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
5181            board_disable);
5182        struct pci_dev *pdev = ha->pdev;
5183        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5184
5185        /* if UNLOAD flag is already set, then continue unload,
5186         * where it was set first.
5187         */
5188        if (test_bit(UNLOADING, &base_vha->dpc_flags))
5189                return;
5190
5191        ql_log(ql_log_warn, base_vha, 0x015b,
5192            "Disabling adapter.\n");
5193
5194        set_bit(UNLOADING, &base_vha->dpc_flags);
5195
5196        qla2x00_delete_all_vps(ha, base_vha);
5197
5198        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
5199
5200        qla2x00_dfs_remove(base_vha);
5201
5202        qla84xx_put_chip(base_vha);
5203
5204        if (base_vha->timer_active)
5205                qla2x00_stop_timer(base_vha);
5206
5207        base_vha->flags.online = 0;
5208
5209        qla2x00_destroy_deferred_work(ha);
5210
5211        /*
5212         * Do not try to stop beacon blink as it will issue a mailbox
5213         * command.
5214         */
5215        qla2x00_free_sysfs_attr(base_vha, false);
5216
5217        fc_remove_host(base_vha->host);
5218
5219        scsi_remove_host(base_vha->host);
5220
5221        base_vha->flags.init_done = 0;
5222        qla25xx_delete_queues(base_vha);
5223        qla2x00_free_fcports(base_vha);
5224        qla2x00_free_irqs(base_vha);
5225        qla2x00_mem_free(ha);
5226        qla82xx_md_free(base_vha);
5227        qla2x00_free_queues(ha);
5228
5229        qla2x00_unmap_iobases(ha);
5230
5231        pci_release_selected_regions(ha->pdev, ha->bars);
5232        pci_disable_pcie_error_reporting(pdev);
5233        pci_disable_device(pdev);
5234
5235        /*
5236         * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
5237         */
5238}
5239
5240/**************************************************************************
5241* qla2x00_do_dpc
5242*   This kernel thread is a task that is schedule by the interrupt handler
5243*   to perform the background processing for interrupts.
5244*
5245* Notes:
5246* This task always run in the context of a kernel thread.  It
5247* is kick-off by the driver's detect code and starts up
5248* up one per adapter. It immediately goes to sleep and waits for
5249* some fibre event.  When either the interrupt handler or
5250* the timer routine detects a event it will one of the task
5251* bits then wake us up.
5252**************************************************************************/
5253static int
5254qla2x00_do_dpc(void *data)
5255{
5256        scsi_qla_host_t *base_vha;
5257        struct qla_hw_data *ha;
5258        uint32_t online;
5259        struct qla_qpair *qpair;
5260
5261        ha = (struct qla_hw_data *)data;
5262        base_vha = pci_get_drvdata(ha->pdev);
5263
5264        set_user_nice(current, MIN_NICE);
5265
5266        set_current_state(TASK_INTERRUPTIBLE);
5267        while (!kthread_should_stop()) {
5268                ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
5269                    "DPC handler sleeping.\n");
5270
5271                schedule();
5272
5273                if (!base_vha->flags.init_done || ha->flags.mbox_busy)
5274                        goto end_loop;
5275
5276                if (ha->flags.eeh_busy) {
5277                        ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
5278                            "eeh_busy=%d.\n", ha->flags.eeh_busy);
5279                        goto end_loop;
5280                }
5281
5282                ha->dpc_active = 1;
5283
5284                ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
5285                    "DPC handler waking up, dpc_flags=0x%lx.\n",
5286                    base_vha->dpc_flags);
5287
5288                if (test_bit(UNLOADING, &base_vha->dpc_flags))
5289                        break;
5290
5291                qla2x00_do_work(base_vha);
5292
5293                if (IS_P3P_TYPE(ha)) {
5294                        if (IS_QLA8044(ha)) {
5295                                if (test_and_clear_bit(ISP_UNRECOVERABLE,
5296                                        &base_vha->dpc_flags)) {
5297                                        qla8044_idc_lock(ha);
5298                                        qla8044_wr_direct(base_vha,
5299                                                QLA8044_CRB_DEV_STATE_INDEX,
5300                                                QLA8XXX_DEV_FAILED);
5301                                        qla8044_idc_unlock(ha);
5302                                        ql_log(ql_log_info, base_vha, 0x4004,
5303                                                "HW State: FAILED.\n");
5304                                        qla8044_device_state_handler(base_vha);
5305                                        continue;
5306                                }
5307
5308                        } else {
5309                                if (test_and_clear_bit(ISP_UNRECOVERABLE,
5310                                        &base_vha->dpc_flags)) {
5311                                        qla82xx_idc_lock(ha);
5312                                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5313                                                QLA8XXX_DEV_FAILED);
5314                                        qla82xx_idc_unlock(ha);
5315                                        ql_log(ql_log_info, base_vha, 0x0151,
5316                                                "HW State: FAILED.\n");
5317                                        qla82xx_device_state_handler(base_vha);
5318                                        continue;
5319                                }
5320                        }
5321
5322                        if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
5323                                &base_vha->dpc_flags)) {
5324
5325                                ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
5326                                    "FCoE context reset scheduled.\n");
5327                                if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
5328                                        &base_vha->dpc_flags))) {
5329                                        if (qla82xx_fcoe_ctx_reset(base_vha)) {
5330                                                /* FCoE-ctx reset failed.
5331                                                 * Escalate to chip-reset
5332                                                 */
5333                                                set_bit(ISP_ABORT_NEEDED,
5334                                                        &base_vha->dpc_flags);
5335                                        }
5336                                        clear_bit(ABORT_ISP_ACTIVE,
5337                                                &base_vha->dpc_flags);
5338                                }
5339
5340                                ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
5341                                    "FCoE context reset end.\n");
5342                        }
5343                } else if (IS_QLAFX00(ha)) {
5344                        if (test_and_clear_bit(ISP_UNRECOVERABLE,
5345                                &base_vha->dpc_flags)) {
5346                                ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
5347                                    "Firmware Reset Recovery\n");
5348                                if (qlafx00_reset_initialize(base_vha)) {
5349                                        /* Failed. Abort isp later. */
5350                                        if (!test_bit(UNLOADING,
5351                                            &base_vha->dpc_flags)) {
5352                                                set_bit(ISP_UNRECOVERABLE,
5353                                                    &base_vha->dpc_flags);
5354                                                ql_dbg(ql_dbg_dpc, base_vha,
5355                                                    0x4021,
5356                                                    "Reset Recovery Failed\n");
5357                                        }
5358                                }
5359                        }
5360
5361                        if (test_and_clear_bit(FX00_TARGET_SCAN,
5362                                &base_vha->dpc_flags)) {
5363                                ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
5364                                    "ISPFx00 Target Scan scheduled\n");
5365                                if (qlafx00_rescan_isp(base_vha)) {
5366                                        if (!test_bit(UNLOADING,
5367                                            &base_vha->dpc_flags))
5368                                                set_bit(ISP_UNRECOVERABLE,
5369                                                    &base_vha->dpc_flags);
5370                                        ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
5371                                            "ISPFx00 Target Scan Failed\n");
5372                                }
5373                                ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
5374                                    "ISPFx00 Target Scan End\n");
5375                        }
5376                        if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
5377                                &base_vha->dpc_flags)) {
5378                                ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
5379                                    "ISPFx00 Host Info resend scheduled\n");
5380                                qlafx00_fx_disc(base_vha,
5381                                    &base_vha->hw->mr.fcport,
5382                                    FXDISC_REG_HOST_INFO);
5383                        }
5384                }
5385
5386                if (test_and_clear_bit(ISP_ABORT_NEEDED,
5387                                                &base_vha->dpc_flags)) {
5388
5389                        ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
5390                            "ISP abort scheduled.\n");
5391                        if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
5392                            &base_vha->dpc_flags))) {
5393
5394                                if (ha->isp_ops->abort_isp(base_vha)) {
5395                                        /* failed. retry later */
5396                                        set_bit(ISP_ABORT_NEEDED,
5397                                            &base_vha->dpc_flags);
5398                                }
5399                                clear_bit(ABORT_ISP_ACTIVE,
5400                                                &base_vha->dpc_flags);
5401                        }
5402
5403                        ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
5404                            "ISP abort end.\n");
5405                }
5406
5407                if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
5408                    &base_vha->dpc_flags)) {
5409                        qla2x00_update_fcports(base_vha);
5410                }
5411
5412                if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
5413                        int ret;
5414                        ret = qla2x00_send_change_request(base_vha, 0x3, 0);
5415                        if (ret != QLA_SUCCESS)
5416                                ql_log(ql_log_warn, base_vha, 0x121,
5417                                    "Failed to enable receiving of RSCN "
5418                                    "requests: 0x%x.\n", ret);
5419                        clear_bit(SCR_PENDING, &base_vha->dpc_flags);
5420                }
5421
5422                if (IS_QLAFX00(ha))
5423                        goto loop_resync_check;
5424
5425                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
5426                        ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
5427                            "Quiescence mode scheduled.\n");
5428                        if (IS_P3P_TYPE(ha)) {
5429                                if (IS_QLA82XX(ha))
5430                                        qla82xx_device_state_handler(base_vha);
5431                                if (IS_QLA8044(ha))
5432                                        qla8044_device_state_handler(base_vha);
5433                                clear_bit(ISP_QUIESCE_NEEDED,
5434                                    &base_vha->dpc_flags);
5435                                if (!ha->flags.quiesce_owner) {
5436                                        qla2x00_perform_loop_resync(base_vha);
5437                                        if (IS_QLA82XX(ha)) {
5438                                                qla82xx_idc_lock(ha);
5439                                                qla82xx_clear_qsnt_ready(
5440                                                    base_vha);
5441                                                qla82xx_idc_unlock(ha);
5442                                        } else if (IS_QLA8044(ha)) {
5443                                                qla8044_idc_lock(ha);
5444                                                qla8044_clear_qsnt_ready(
5445                                                    base_vha);
5446                                                qla8044_idc_unlock(ha);
5447                                        }
5448                                }
5449                        } else {
5450                                clear_bit(ISP_QUIESCE_NEEDED,
5451                                    &base_vha->dpc_flags);
5452                                qla2x00_quiesce_io(base_vha);
5453                        }
5454                        ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
5455                            "Quiescence mode end.\n");
5456                }
5457
5458                if (test_and_clear_bit(RESET_MARKER_NEEDED,
5459                                &base_vha->dpc_flags) &&
5460                    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
5461
5462                        ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
5463                            "Reset marker scheduled.\n");
5464                        qla2x00_rst_aen(base_vha);
5465                        clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
5466                        ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
5467                            "Reset marker end.\n");
5468                }
5469
5470                /* Retry each device up to login retry count */
5471                if ((test_and_clear_bit(RELOGIN_NEEDED,
5472                                                &base_vha->dpc_flags)) &&
5473                    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
5474                    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
5475
5476                        ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
5477                            "Relogin scheduled.\n");
5478                        qla2x00_relogin(base_vha);
5479                        ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
5480                            "Relogin end.\n");
5481                }
5482loop_resync_check:
5483                if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
5484                    &base_vha->dpc_flags)) {
5485
5486                        ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
5487                            "Loop resync scheduled.\n");
5488
5489                        if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
5490                            &base_vha->dpc_flags))) {
5491
5492                                qla2x00_loop_resync(base_vha);
5493
5494                                clear_bit(LOOP_RESYNC_ACTIVE,
5495                                                &base_vha->dpc_flags);
5496                        }
5497
5498                        ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
5499                            "Loop resync end.\n");
5500                }
5501
5502                if (IS_QLAFX00(ha))
5503                        goto intr_on_check;
5504
5505                if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
5506                    atomic_read(&base_vha->loop_state) == LOOP_READY) {
5507                        clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
5508                        qla2xxx_flash_npiv_conf(base_vha);
5509                }
5510
5511intr_on_check:
5512                if (!ha->interrupts_on)
5513                        ha->isp_ops->enable_intrs(ha);
5514
5515                if (test_and_clear_bit(BEACON_BLINK_NEEDED,
5516                                        &base_vha->dpc_flags)) {
5517                        if (ha->beacon_blink_led == 1)
5518                                ha->isp_ops->beacon_blink(base_vha);
5519                }
5520
5521                /* qpair online check */
5522                if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
5523                    &base_vha->dpc_flags)) {
5524                        if (ha->flags.eeh_busy ||
5525                            ha->flags.pci_channel_io_perm_failure)
5526                                online = 0;
5527                        else
5528                                online = 1;
5529
5530                        mutex_lock(&ha->mq_lock);
5531                        list_for_each_entry(qpair, &base_vha->qp_list,
5532                            qp_list_elem)
5533                        qpair->online = online;
5534                        mutex_unlock(&ha->mq_lock);
5535                }
5536
5537                if (!IS_QLAFX00(ha))
5538                        qla2x00_do_dpc_all_vps(base_vha);
5539
5540                ha->dpc_active = 0;
5541end_loop:
5542                set_current_state(TASK_INTERRUPTIBLE);
5543        } /* End of while(1) */
5544        __set_current_state(TASK_RUNNING);
5545
5546        ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
5547            "DPC handler exiting.\n");
5548
5549        /*
5550         * Make sure that nobody tries to wake us up again.
5551         */
5552        ha->dpc_active = 0;
5553
5554        /* Cleanup any residual CTX SRBs. */
5555        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
5556
5557        return 0;
5558}
5559
5560void
5561qla2xxx_wake_dpc(struct scsi_qla_host *vha)
5562{
5563        struct qla_hw_data *ha = vha->hw;
5564        struct task_struct *t = ha->dpc_thread;
5565
5566        if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
5567                wake_up_process(t);
5568}
5569
5570/*
5571*  qla2x00_rst_aen
5572*      Processes asynchronous reset.
5573*
5574* Input:
5575*      ha  = adapter block pointer.
5576*/
5577static void
5578qla2x00_rst_aen(scsi_qla_host_t *vha)
5579{
5580        if (vha->flags.online && !vha->flags.reset_active &&
5581            !atomic_read(&vha->loop_down_timer) &&
5582            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
5583                do {
5584                        clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5585
5586                        /*
5587                         * Issue marker command only when we are going to start
5588                         * the I/O.
5589                         */
5590                        vha->marker_needed = 1;
5591                } while (!atomic_read(&vha->loop_down_timer) &&
5592                    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
5593        }
5594}
5595
5596/**************************************************************************
5597*   qla2x00_timer
5598*
5599* Description:
5600*   One second timer
5601*
5602* Context: Interrupt
5603***************************************************************************/
5604void
5605qla2x00_timer(scsi_qla_host_t *vha)
5606{
5607        unsigned long   cpu_flags = 0;
5608        int             start_dpc = 0;
5609        int             index;
5610        srb_t           *sp;
5611        uint16_t        w;
5612        struct qla_hw_data *ha = vha->hw;
5613        struct req_que *req;
5614
5615        if (ha->flags.eeh_busy) {
5616                ql_dbg(ql_dbg_timer, vha, 0x6000,
5617                    "EEH = %d, restarting timer.\n",
5618                    ha->flags.eeh_busy);
5619                qla2x00_restart_timer(vha, WATCH_INTERVAL);
5620                return;
5621        }
5622
5623        /*
5624         * Hardware read to raise pending EEH errors during mailbox waits. If
5625         * the read returns -1 then disable the board.
5626         */
5627        if (!pci_channel_offline(ha->pdev)) {
5628                pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
5629                qla2x00_check_reg16_for_disconnect(vha, w);
5630        }
5631
5632        /* Make sure qla82xx_watchdog is run only for physical port */
5633        if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
5634                if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
5635                        start_dpc++;
5636                if (IS_QLA82XX(ha))
5637                        qla82xx_watchdog(vha);
5638                else if (IS_QLA8044(ha))
5639                        qla8044_watchdog(vha);
5640        }
5641
5642        if (!vha->vp_idx && IS_QLAFX00(ha))
5643                qlafx00_timer_routine(vha);
5644
5645        /* Loop down handler. */
5646        if (atomic_read(&vha->loop_down_timer) > 0 &&
5647            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
5648            !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
5649                && vha->flags.online) {
5650
5651                if (atomic_read(&vha->loop_down_timer) ==
5652                    vha->loop_down_abort_time) {
5653
5654                        ql_log(ql_log_info, vha, 0x6008,
5655                            "Loop down - aborting the queues before time expires.\n");
5656
5657                        if (!IS_QLA2100(ha) && vha->link_down_timeout)
5658                                atomic_set(&vha->loop_state, LOOP_DEAD);
5659
5660                        /*
5661                         * Schedule an ISP abort to return any FCP2-device
5662                         * commands.
5663                         */
5664                        /* NPIV - scan physical port only */
5665                        if (!vha->vp_idx) {
5666                                spin_lock_irqsave(&ha->hardware_lock,
5667                                    cpu_flags);
5668                                req = ha->req_q_map[0];
5669                                for (index = 1;
5670                                    index < req->num_outstanding_cmds;
5671                                    index++) {
5672                                        fc_port_t *sfcp;
5673
5674                                        sp = req->outstanding_cmds[index];
5675                                        if (!sp)
5676                                                continue;
5677                                        if (sp->type != SRB_SCSI_CMD)
5678                                                continue;
5679                                        sfcp = sp->fcport;
5680                                        if (!(sfcp->flags & FCF_FCP2_DEVICE))
5681                                                continue;
5682
5683                                        if (IS_QLA82XX(ha))
5684                                                set_bit(FCOE_CTX_RESET_NEEDED,
5685                                                        &vha->dpc_flags);
5686                                        else
5687                                                set_bit(ISP_ABORT_NEEDED,
5688                                                        &vha->dpc_flags);
5689                                        break;
5690                                }
5691                                spin_unlock_irqrestore(&ha->hardware_lock,
5692                                                                cpu_flags);
5693                        }
5694                        start_dpc++;
5695                }
5696
5697                /* if the loop has been down for 4 minutes, reinit adapter */
5698                if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
5699                        if (!(vha->device_flags & DFLG_NO_CABLE)) {
5700                                ql_log(ql_log_warn, vha, 0x6009,
5701                                    "Loop down - aborting ISP.\n");
5702
5703                                if (IS_QLA82XX(ha))
5704                                        set_bit(FCOE_CTX_RESET_NEEDED,
5705                                                &vha->dpc_flags);
5706                                else
5707                                        set_bit(ISP_ABORT_NEEDED,
5708                                                &vha->dpc_flags);
5709                        }
5710                }
5711                ql_dbg(ql_dbg_timer, vha, 0x600a,
5712                    "Loop down - seconds remaining %d.\n",
5713                    atomic_read(&vha->loop_down_timer));
5714        }
5715        /* Check if beacon LED needs to be blinked for physical host only */
5716        if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
5717                /* There is no beacon_blink function for ISP82xx */
5718                if (!IS_P3P_TYPE(ha)) {
5719                        set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
5720                        start_dpc++;
5721                }
5722        }
5723
5724        /* Process any deferred work. */
5725        if (!list_empty(&vha->work_list))
5726                start_dpc++;
5727
5728        /* Schedule the DPC routine if needed */
5729        if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
5730            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
5731            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
5732            start_dpc ||
5733            test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
5734            test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
5735            test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
5736            test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
5737            test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
5738            test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
5739                ql_dbg(ql_dbg_timer, vha, 0x600b,
5740                    "isp_abort_needed=%d loop_resync_needed=%d "
5741                    "fcport_update_needed=%d start_dpc=%d "
5742                    "reset_marker_needed=%d",
5743                    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
5744                    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
5745                    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
5746                    start_dpc,
5747                    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
5748                ql_dbg(ql_dbg_timer, vha, 0x600c,
5749                    "beacon_blink_needed=%d isp_unrecoverable=%d "
5750                    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
5751                    "relogin_needed=%d.\n",
5752                    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
5753                    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
5754                    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
5755                    test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
5756                    test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
5757                qla2xxx_wake_dpc(vha);
5758        }
5759
5760        qla2x00_restart_timer(vha, WATCH_INTERVAL);
5761}
5762
5763/* Firmware interface routines. */
5764
5765#define FW_BLOBS        11
5766#define FW_ISP21XX      0
5767#define FW_ISP22XX      1
5768#define FW_ISP2300      2
5769#define FW_ISP2322      3
5770#define FW_ISP24XX      4
5771#define FW_ISP25XX      5
5772#define FW_ISP81XX      6
5773#define FW_ISP82XX      7
5774#define FW_ISP2031      8
5775#define FW_ISP8031      9
5776#define FW_ISP27XX      10
5777
5778#define FW_FILE_ISP21XX "ql2100_fw.bin"
5779#define FW_FILE_ISP22XX "ql2200_fw.bin"
5780#define FW_FILE_ISP2300 "ql2300_fw.bin"
5781#define FW_FILE_ISP2322 "ql2322_fw.bin"
5782#define FW_FILE_ISP24XX "ql2400_fw.bin"
5783#define FW_FILE_ISP25XX "ql2500_fw.bin"
5784#define FW_FILE_ISP81XX "ql8100_fw.bin"
5785#define FW_FILE_ISP82XX "ql8200_fw.bin"
5786#define FW_FILE_ISP2031 "ql2600_fw.bin"
5787#define FW_FILE_ISP8031 "ql8300_fw.bin"
5788#define FW_FILE_ISP27XX "ql2700_fw.bin"
5789
5790
5791static DEFINE_MUTEX(qla_fw_lock);
5792
5793static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
5794        { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
5795        { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
5796        { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
5797        { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
5798        { .name = FW_FILE_ISP24XX, },
5799        { .name = FW_FILE_ISP25XX, },
5800        { .name = FW_FILE_ISP81XX, },
5801        { .name = FW_FILE_ISP82XX, },
5802        { .name = FW_FILE_ISP2031, },
5803        { .name = FW_FILE_ISP8031, },
5804        { .name = FW_FILE_ISP27XX, },
5805};
5806
5807struct fw_blob *
5808qla2x00_request_firmware(scsi_qla_host_t *vha)
5809{
5810        struct qla_hw_data *ha = vha->hw;
5811        struct fw_blob *blob;
5812
5813        if (IS_QLA2100(ha)) {
5814                blob = &qla_fw_blobs[FW_ISP21XX];
5815        } else if (IS_QLA2200(ha)) {
5816                blob = &qla_fw_blobs[FW_ISP22XX];
5817        } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
5818                blob = &qla_fw_blobs[FW_ISP2300];
5819        } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
5820                blob = &qla_fw_blobs[FW_ISP2322];
5821        } else if (IS_QLA24XX_TYPE(ha)) {
5822                blob = &qla_fw_blobs[FW_ISP24XX];
5823        } else if (IS_QLA25XX(ha)) {
5824                blob = &qla_fw_blobs[FW_ISP25XX];
5825        } else if (IS_QLA81XX(ha)) {
5826                blob = &qla_fw_blobs[FW_ISP81XX];
5827        } else if (IS_QLA82XX(ha)) {
5828                blob = &qla_fw_blobs[FW_ISP82XX];
5829        } else if (IS_QLA2031(ha)) {
5830                blob = &qla_fw_blobs[FW_ISP2031];
5831        } else if (IS_QLA8031(ha)) {
5832                blob = &qla_fw_blobs[FW_ISP8031];
5833        } else if (IS_QLA27XX(ha)) {
5834                blob = &qla_fw_blobs[FW_ISP27XX];
5835        } else {
5836                return NULL;
5837        }
5838
5839        mutex_lock(&qla_fw_lock);
5840        if (blob->fw)
5841                goto out;
5842
5843        if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
5844                ql_log(ql_log_warn, vha, 0x0063,
5845                    "Failed to load firmware image (%s).\n", blob->name);
5846                blob->fw = NULL;
5847                blob = NULL;
5848                goto out;
5849        }
5850
5851out:
5852        mutex_unlock(&qla_fw_lock);
5853        return blob;
5854}
5855
5856static void
5857qla2x00_release_firmware(void)
5858{
5859        int idx;
5860
5861        mutex_lock(&qla_fw_lock);
5862        for (idx = 0; idx < FW_BLOBS; idx++)
5863                release_firmware(qla_fw_blobs[idx].fw);
5864        mutex_unlock(&qla_fw_lock);
5865}
5866
5867static pci_ers_result_t
5868qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5869{
5870        scsi_qla_host_t *vha = pci_get_drvdata(pdev);
5871        struct qla_hw_data *ha = vha->hw;
5872
5873        ql_dbg(ql_dbg_aer, vha, 0x9000,
5874            "PCI error detected, state %x.\n", state);
5875
5876        switch (state) {
5877        case pci_channel_io_normal:
5878                ha->flags.eeh_busy = 0;
5879                if (ql2xmqsupport) {
5880                        set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5881                        qla2xxx_wake_dpc(vha);
5882                }
5883                return PCI_ERS_RESULT_CAN_RECOVER;
5884        case pci_channel_io_frozen:
5885                ha->flags.eeh_busy = 1;
5886                /* For ISP82XX complete any pending mailbox cmd */
5887                if (IS_QLA82XX(ha)) {
5888                        ha->flags.isp82xx_fw_hung = 1;
5889                        ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
5890                        qla82xx_clear_pending_mbx(vha);
5891                }
5892                qla2x00_free_irqs(vha);
5893                pci_disable_device(pdev);
5894                /* Return back all IOs */
5895                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5896                if (ql2xmqsupport) {
5897                        set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5898                        qla2xxx_wake_dpc(vha);
5899                }
5900                return PCI_ERS_RESULT_NEED_RESET;
5901        case pci_channel_io_perm_failure:
5902                ha->flags.pci_channel_io_perm_failure = 1;
5903                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
5904                if (ql2xmqsupport) {
5905                        set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5906                        qla2xxx_wake_dpc(vha);
5907                }
5908                return PCI_ERS_RESULT_DISCONNECT;
5909        }
5910        return PCI_ERS_RESULT_NEED_RESET;
5911}
5912
5913static pci_ers_result_t
5914qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
5915{
5916        int risc_paused = 0;
5917        uint32_t stat;
5918        unsigned long flags;
5919        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
5920        struct qla_hw_data *ha = base_vha->hw;
5921        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
5922        struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
5923
5924        if (IS_QLA82XX(ha))
5925                return PCI_ERS_RESULT_RECOVERED;
5926
5927        spin_lock_irqsave(&ha->hardware_lock, flags);
5928        if (IS_QLA2100(ha) || IS_QLA2200(ha)){
5929                stat = RD_REG_DWORD(&reg->hccr);
5930                if (stat & HCCR_RISC_PAUSE)
5931                        risc_paused = 1;
5932        } else if (IS_QLA23XX(ha)) {
5933                stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
5934                if (stat & HSR_RISC_PAUSED)
5935                        risc_paused = 1;
5936        } else if (IS_FWI2_CAPABLE(ha)) {
5937                stat = RD_REG_DWORD(&reg24->host_status);
5938                if (stat & HSRX_RISC_PAUSED)
5939                        risc_paused = 1;
5940        }
5941        spin_unlock_irqrestore(&ha->hardware_lock, flags);
5942
5943        if (risc_paused) {
5944                ql_log(ql_log_info, base_vha, 0x9003,
5945                    "RISC paused -- mmio_enabled, Dumping firmware.\n");
5946                ha->isp_ops->fw_dump(base_vha, 0);
5947
5948                return PCI_ERS_RESULT_NEED_RESET;
5949        } else
5950                return PCI_ERS_RESULT_RECOVERED;
5951}
5952
5953static uint32_t
5954qla82xx_error_recovery(scsi_qla_host_t *base_vha)
5955{
5956        uint32_t rval = QLA_FUNCTION_FAILED;
5957        uint32_t drv_active = 0;
5958        struct qla_hw_data *ha = base_vha->hw;
5959        int fn;
5960        struct pci_dev *other_pdev = NULL;
5961
5962        ql_dbg(ql_dbg_aer, base_vha, 0x9006,
5963            "Entered %s.\n", __func__);
5964
5965        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
5966
5967        if (base_vha->flags.online) {
5968                /* Abort all outstanding commands,
5969                 * so as to be requeued later */
5970                qla2x00_abort_isp_cleanup(base_vha);
5971        }
5972
5973
5974        fn = PCI_FUNC(ha->pdev->devfn);
5975        while (fn > 0) {
5976                fn--;
5977                ql_dbg(ql_dbg_aer, base_vha, 0x9007,
5978                    "Finding pci device at function = 0x%x.\n", fn);
5979                other_pdev =
5980                    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5981                    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5982                    fn));
5983
5984                if (!other_pdev)
5985                        continue;
5986                if (atomic_read(&other_pdev->enable_cnt)) {
5987                        ql_dbg(ql_dbg_aer, base_vha, 0x9008,
5988                            "Found PCI func available and enable at 0x%x.\n",
5989                            fn);
5990                        pci_dev_put(other_pdev);
5991                        break;
5992                }
5993                pci_dev_put(other_pdev);
5994        }
5995
5996        if (!fn) {
5997                /* Reset owner */
5998                ql_dbg(ql_dbg_aer, base_vha, 0x9009,
5999                    "This devfn is reset owner = 0x%x.\n",
6000                    ha->pdev->devfn);
6001                qla82xx_idc_lock(ha);
6002
6003                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6004                    QLA8XXX_DEV_INITIALIZING);
6005
6006                qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
6007                    QLA82XX_IDC_VERSION);
6008
6009                drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
6010                ql_dbg(ql_dbg_aer, base_vha, 0x900a,
6011                    "drv_active = 0x%x.\n", drv_active);
6012
6013                qla82xx_idc_unlock(ha);
6014                /* Reset if device is not already reset
6015                 * drv_active would be 0 if a reset has already been done
6016                 */
6017                if (drv_active)
6018                        rval = qla82xx_start_firmware(base_vha);
6019                else
6020                        rval = QLA_SUCCESS;
6021                qla82xx_idc_lock(ha);
6022
6023                if (rval != QLA_SUCCESS) {
6024                        ql_log(ql_log_info, base_vha, 0x900b,
6025                            "HW State: FAILED.\n");
6026                        qla82xx_clear_drv_active(ha);
6027                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6028                            QLA8XXX_DEV_FAILED);
6029                } else {
6030                        ql_log(ql_log_info, base_vha, 0x900c,
6031                            "HW State: READY.\n");
6032                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6033                            QLA8XXX_DEV_READY);
6034                        qla82xx_idc_unlock(ha);
6035                        ha->flags.isp82xx_fw_hung = 0;
6036                        rval = qla82xx_restart_isp(base_vha);
6037                        qla82xx_idc_lock(ha);
6038                        /* Clear driver state register */
6039                        qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
6040                        qla82xx_set_drv_active(base_vha);
6041                }
6042                qla82xx_idc_unlock(ha);
6043        } else {
6044                ql_dbg(ql_dbg_aer, base_vha, 0x900d,
6045                    "This devfn is not reset owner = 0x%x.\n",
6046                    ha->pdev->devfn);
6047                if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
6048                    QLA8XXX_DEV_READY)) {
6049                        ha->flags.isp82xx_fw_hung = 0;
6050                        rval = qla82xx_restart_isp(base_vha);
6051                        qla82xx_idc_lock(ha);
6052                        qla82xx_set_drv_active(base_vha);
6053                        qla82xx_idc_unlock(ha);
6054                }
6055        }
6056        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6057
6058        return rval;
6059}
6060
6061static pci_ers_result_t
6062qla2xxx_pci_slot_reset(struct pci_dev *pdev)
6063{
6064        pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
6065        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
6066        struct qla_hw_data *ha = base_vha->hw;
6067        struct rsp_que *rsp;
6068        int rc, retries = 10;
6069
6070        ql_dbg(ql_dbg_aer, base_vha, 0x9004,
6071            "Slot Reset.\n");
6072
6073        /* Workaround: qla2xxx driver which access hardware earlier
6074         * needs error state to be pci_channel_io_online.
6075         * Otherwise mailbox command timesout.
6076         */
6077        pdev->error_state = pci_channel_io_normal;
6078
6079        pci_restore_state(pdev);
6080
6081        /* pci_restore_state() clears the saved_state flag of the device
6082         * save restored state which resets saved_state flag
6083         */
6084        pci_save_state(pdev);
6085
6086        if (ha->mem_only)
6087                rc = pci_enable_device_mem(pdev);
6088        else
6089                rc = pci_enable_device(pdev);
6090
6091        if (rc) {
6092                ql_log(ql_log_warn, base_vha, 0x9005,
6093                    "Can't re-enable PCI device after reset.\n");
6094                goto exit_slot_reset;
6095        }
6096
6097        rsp = ha->rsp_q_map[0];
6098        if (qla2x00_request_irqs(ha, rsp))
6099                goto exit_slot_reset;
6100
6101        if (ha->isp_ops->pci_config(base_vha))
6102                goto exit_slot_reset;
6103
6104        if (IS_QLA82XX(ha)) {
6105                if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
6106                        ret = PCI_ERS_RESULT_RECOVERED;
6107                        goto exit_slot_reset;
6108                } else
6109                        goto exit_slot_reset;
6110        }
6111
6112        while (ha->flags.mbox_busy && retries--)
6113                msleep(1000);
6114
6115        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6116        if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
6117                ret =  PCI_ERS_RESULT_RECOVERED;
6118        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6119
6120
6121exit_slot_reset:
6122        ql_dbg(ql_dbg_aer, base_vha, 0x900e,
6123            "slot_reset return %x.\n", ret);
6124
6125        return ret;
6126}
6127
6128static void
6129qla2xxx_pci_resume(struct pci_dev *pdev)
6130{
6131        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
6132        struct qla_hw_data *ha = base_vha->hw;
6133        int ret;
6134
6135        ql_dbg(ql_dbg_aer, base_vha, 0x900f,
6136            "pci_resume.\n");
6137
6138        ret = qla2x00_wait_for_hba_online(base_vha);
6139        if (ret != QLA_SUCCESS) {
6140                ql_log(ql_log_fatal, base_vha, 0x9002,
6141                    "The device failed to resume I/O from slot/link_reset.\n");
6142        }
6143
6144        pci_cleanup_aer_uncorrect_error_status(pdev);
6145
6146        ha->flags.eeh_busy = 0;
6147}
6148
6149static void
6150qla83xx_disable_laser(scsi_qla_host_t *vha)
6151{
6152        uint32_t reg, data, fn;
6153        struct qla_hw_data *ha = vha->hw;
6154        struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
6155
6156        /* pci func #/port # */
6157        ql_dbg(ql_dbg_init, vha, 0x004b,
6158            "Disabling Laser for hba: %p\n", vha);
6159
6160        fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
6161                (BIT_15|BIT_14|BIT_13|BIT_12));
6162
6163        fn = (fn >> 12);
6164
6165        if (fn & 1)
6166                reg = PORT_1_2031;
6167        else
6168                reg = PORT_0_2031;
6169
6170        data = LASER_OFF_2031;
6171
6172        qla83xx_wr_reg(vha, reg, data);
6173}
6174
6175static int qla2xxx_map_queues(struct Scsi_Host *shost)
6176{
6177        scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
6178
6179        return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
6180}
6181
6182static const struct pci_error_handlers qla2xxx_err_handler = {
6183        .error_detected = qla2xxx_pci_error_detected,
6184        .mmio_enabled = qla2xxx_pci_mmio_enabled,
6185        .slot_reset = qla2xxx_pci_slot_reset,
6186        .resume = qla2xxx_pci_resume,
6187};
6188
6189static struct pci_device_id qla2xxx_pci_tbl[] = {
6190        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
6191        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
6192        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
6193        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
6194        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
6195        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
6196        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
6197        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
6198        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
6199        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
6200        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
6201        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
6202        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
6203        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
6204        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
6205        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
6206        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
6207        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
6208        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
6209        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
6210        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
6211        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
6212        { 0 },
6213};
6214MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
6215
6216static struct pci_driver qla2xxx_pci_driver = {
6217        .name           = QLA2XXX_DRIVER_NAME,
6218        .driver         = {
6219                .owner          = THIS_MODULE,
6220        },
6221        .id_table       = qla2xxx_pci_tbl,
6222        .probe          = qla2x00_probe_one,
6223        .remove         = qla2x00_remove_one,
6224        .shutdown       = qla2x00_shutdown,
6225        .err_handler    = &qla2xxx_err_handler,
6226};
6227
6228static const struct file_operations apidev_fops = {
6229        .owner = THIS_MODULE,
6230        .llseek = noop_llseek,
6231};
6232
6233/**
6234 * qla2x00_module_init - Module initialization.
6235 **/
6236static int __init
6237qla2x00_module_init(void)
6238{
6239        int ret = 0;
6240
6241        /* Allocate cache for SRBs. */
6242        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
6243            SLAB_HWCACHE_ALIGN, NULL);
6244        if (srb_cachep == NULL) {
6245                ql_log(ql_log_fatal, NULL, 0x0001,
6246                    "Unable to allocate SRB cache...Failing load!.\n");
6247                return -ENOMEM;
6248        }
6249
6250        /* Initialize target kmem_cache and mem_pools */
6251        ret = qlt_init();
6252        if (ret < 0) {
6253                kmem_cache_destroy(srb_cachep);
6254                return ret;
6255        } else if (ret > 0) {
6256                /*
6257                 * If initiator mode is explictly disabled by qlt_init(),
6258                 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
6259                 * performing scsi_scan_target() during LOOP UP event.
6260                 */
6261                qla2xxx_transport_functions.disable_target_scan = 1;
6262                qla2xxx_transport_vport_functions.disable_target_scan = 1;
6263        }
6264
6265        /* Derive version string. */
6266        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
6267        if (ql2xextended_error_logging)
6268                strcat(qla2x00_version_str, "-debug");
6269
6270        qla2xxx_transport_template =
6271            fc_attach_transport(&qla2xxx_transport_functions);
6272        if (!qla2xxx_transport_template) {
6273                kmem_cache_destroy(srb_cachep);
6274                ql_log(ql_log_fatal, NULL, 0x0002,
6275                    "fc_attach_transport failed...Failing load!.\n");
6276                qlt_exit();
6277                return -ENODEV;
6278        }
6279
6280        apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
6281        if (apidev_major < 0) {
6282                ql_log(ql_log_fatal, NULL, 0x0003,
6283                    "Unable to register char device %s.\n", QLA2XXX_APIDEV);
6284        }
6285
6286        qla2xxx_transport_vport_template =
6287            fc_attach_transport(&qla2xxx_transport_vport_functions);
6288        if (!qla2xxx_transport_vport_template) {
6289                kmem_cache_destroy(srb_cachep);
6290                qlt_exit();
6291                fc_release_transport(qla2xxx_transport_template);
6292                ql_log(ql_log_fatal, NULL, 0x0004,
6293                    "fc_attach_transport vport failed...Failing load!.\n");
6294                return -ENODEV;
6295        }
6296        ql_log(ql_log_info, NULL, 0x0005,
6297            "QLogic Fibre Channel HBA Driver: %s.\n",
6298            qla2x00_version_str);
6299        ret = pci_register_driver(&qla2xxx_pci_driver);
6300        if (ret) {
6301                kmem_cache_destroy(srb_cachep);
6302                qlt_exit();
6303                fc_release_transport(qla2xxx_transport_template);
6304                fc_release_transport(qla2xxx_transport_vport_template);
6305                ql_log(ql_log_fatal, NULL, 0x0006,
6306                    "pci_register_driver failed...ret=%d Failing load!.\n",
6307                    ret);
6308        }
6309        return ret;
6310}
6311
6312/**
6313 * qla2x00_module_exit - Module cleanup.
6314 **/
6315static void __exit
6316qla2x00_module_exit(void)
6317{
6318        unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
6319        pci_unregister_driver(&qla2xxx_pci_driver);
6320        qla2x00_release_firmware();
6321        kmem_cache_destroy(srb_cachep);
6322        qlt_exit();
6323        if (ctx_cachep)
6324                kmem_cache_destroy(ctx_cachep);
6325        fc_release_transport(qla2xxx_transport_template);
6326        fc_release_transport(qla2xxx_transport_vport_template);
6327}
6328
6329module_init(qla2x00_module_init);
6330module_exit(qla2x00_module_exit);
6331
6332MODULE_AUTHOR("QLogic Corporation");
6333MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
6334MODULE_LICENSE("GPL");
6335MODULE_VERSION(QLA2XXX_VERSION);
6336MODULE_FIRMWARE(FW_FILE_ISP21XX);
6337MODULE_FIRMWARE(FW_FILE_ISP22XX);
6338MODULE_FIRMWARE(FW_FILE_ISP2300);
6339MODULE_FIRMWARE(FW_FILE_ISP2322);
6340MODULE_FIRMWARE(FW_FILE_ISP24XX);
6341MODULE_FIRMWARE(FW_FILE_ISP25XX);
6342