linux/drivers/scsi/qla2xxx/qla_os.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/moduleparam.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12#include <linux/kthread.h>
  13#include <linux/mutex.h>
  14
  15#include <scsi/scsi_tcq.h>
  16#include <scsi/scsicam.h>
  17#include <scsi/scsi_transport.h>
  18#include <scsi/scsi_transport_fc.h>
  19
  20/*
  21 * Driver version
  22 */
  23char qla2x00_version_str[40];
  24
  25/*
  26 * SRB allocation cache
  27 */
  28static struct kmem_cache *srb_cachep;
  29
  30int ql2xlogintimeout = 20;
  31module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
  32MODULE_PARM_DESC(ql2xlogintimeout,
  33                "Login timeout value in seconds.");
  34
  35int qlport_down_retry;
  36module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
  37MODULE_PARM_DESC(qlport_down_retry,
  38                "Maximum number of command retries to a port that returns "
  39                "a PORT-DOWN status.");
  40
  41int ql2xplogiabsentdevice;
  42module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  43MODULE_PARM_DESC(ql2xplogiabsentdevice,
  44                "Option to enable PLOGI to devices that are not present after "
  45                "a Fabric scan.  This is needed for several broken switches. "
  46                "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
  47
  48int ql2xloginretrycount = 0;
  49module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
  50MODULE_PARM_DESC(ql2xloginretrycount,
  51                "Specify an alternate value for the NVRAM login retry count.");
  52
  53int ql2xallocfwdump = 1;
  54module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
  55MODULE_PARM_DESC(ql2xallocfwdump,
  56                "Option to enable allocation of memory for a firmware dump "
  57                "during HBA initialization.  Memory allocation requirements "
  58                "vary by ISP type.  Default is 1 - allocate memory.");
  59
  60int ql2xextended_error_logging;
  61module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  62MODULE_PARM_DESC(ql2xextended_error_logging,
  63                "Option to enable extended error logging, "
  64                "Default is 0 - no logging. 1 - log errors.");
  65
  66static void qla2x00_free_device(scsi_qla_host_t *);
  67
  68int ql2xfdmienable=1;
  69module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
  70MODULE_PARM_DESC(ql2xfdmienable,
  71                "Enables FDMI registratons "
  72                "Default is 0 - no FDMI. 1 - perfom FDMI.");
  73
  74#define MAX_Q_DEPTH    32
  75static int ql2xmaxqdepth = MAX_Q_DEPTH;
  76module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
  77MODULE_PARM_DESC(ql2xmaxqdepth,
  78                "Maximum queue depth to report for target devices.");
  79
  80int ql2xqfulltracking = 1;
  81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
  82MODULE_PARM_DESC(ql2xqfulltracking,
  83                "Controls whether the driver tracks queue full status "
  84                "returns and dynamically adjusts a scsi device's queue "
  85                "depth.  Default is 1, perform tracking.  Set to 0 to "
  86                "disable dynamic tracking and adjustment of queue depth.");
  87
  88int ql2xqfullrampup = 120;
  89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
  90MODULE_PARM_DESC(ql2xqfullrampup,
  91                "Number of seconds to wait to begin to ramp-up the queue "
  92                "depth for a device after a queue-full condition has been "
  93                "detected.  Default is 120 seconds.");
  94
  95int ql2xiidmaenable=1;
  96module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
  97MODULE_PARM_DESC(ql2xiidmaenable,
  98                "Enables iIDMA settings "
  99                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 100
 101int ql2xmaxqueues = 1;
 102module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
 103MODULE_PARM_DESC(ql2xmaxqueues,
 104                "Enables MQ settings "
 105                "Default is 1 for single queue. Set it to number \
 106                        of queues in MQ mode.");
 107
 108int ql2xmultique_tag;
 109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
 110MODULE_PARM_DESC(ql2xmultique_tag,
 111                "Enables CPU affinity settings for the driver "
 112                "Default is 0 for no affinity of request and response IO. "
 113                "Set it to 1 to turn on the cpu affinity.");
 114
 115int ql2xfwloadbin;
 116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
 117MODULE_PARM_DESC(ql2xfwloadbin,
 118                "Option to specify location from which to load ISP firmware:\n"
 119                " 2 -- load firmware via the request_firmware() (hotplug)\n"
 120                "      interface.\n"
 121                " 1 -- load firmware from flash.\n"
 122                " 0 -- use default semantics.\n");
 123
 124/*
 125 * SCSI host template entry points
 126 */
 127static int qla2xxx_slave_configure(struct scsi_device * device);
 128static int qla2xxx_slave_alloc(struct scsi_device *);
 129static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
 130static void qla2xxx_scan_start(struct Scsi_Host *);
 131static void qla2xxx_slave_destroy(struct scsi_device *);
 132static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
 133                void (*fn)(struct scsi_cmnd *));
 134static int qla2xxx_eh_abort(struct scsi_cmnd *);
 135static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
 136static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
 137static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
 138static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 139
 140static int qla2x00_change_queue_depth(struct scsi_device *, int);
 141static int qla2x00_change_queue_type(struct scsi_device *, int);
 142
 143struct scsi_host_template qla2xxx_driver_template = {
 144        .module                 = THIS_MODULE,
 145        .name                   = QLA2XXX_DRIVER_NAME,
 146        .queuecommand           = qla2xxx_queuecommand,
 147
 148        .eh_abort_handler       = qla2xxx_eh_abort,
 149        .eh_device_reset_handler = qla2xxx_eh_device_reset,
 150        .eh_target_reset_handler = qla2xxx_eh_target_reset,
 151        .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
 152        .eh_host_reset_handler  = qla2xxx_eh_host_reset,
 153
 154        .slave_configure        = qla2xxx_slave_configure,
 155
 156        .slave_alloc            = qla2xxx_slave_alloc,
 157        .slave_destroy          = qla2xxx_slave_destroy,
 158        .scan_finished          = qla2xxx_scan_finished,
 159        .scan_start             = qla2xxx_scan_start,
 160        .change_queue_depth     = qla2x00_change_queue_depth,
 161        .change_queue_type      = qla2x00_change_queue_type,
 162        .this_id                = -1,
 163        .cmd_per_lun            = 3,
 164        .use_clustering         = ENABLE_CLUSTERING,
 165        .sg_tablesize           = SG_ALL,
 166
 167        .max_sectors            = 0xFFFF,
 168        .shost_attrs            = qla2x00_host_attrs,
 169};
 170
 171static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 172struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 173
 174/* TODO Convert to inlines
 175 *
 176 * Timer routines
 177 */
 178
 179__inline__ void
 180qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
 181{
 182        init_timer(&vha->timer);
 183        vha->timer.expires = jiffies + interval * HZ;
 184        vha->timer.data = (unsigned long)vha;
 185        vha->timer.function = (void (*)(unsigned long))func;
 186        add_timer(&vha->timer);
 187        vha->timer_active = 1;
 188}
 189
 190static inline void
 191qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
 192{
 193        mod_timer(&vha->timer, jiffies + interval * HZ);
 194}
 195
 196static __inline__ void
 197qla2x00_stop_timer(scsi_qla_host_t *vha)
 198{
 199        del_timer_sync(&vha->timer);
 200        vha->timer_active = 0;
 201}
 202
 203static int qla2x00_do_dpc(void *data);
 204
 205static void qla2x00_rst_aen(scsi_qla_host_t *);
 206
 207static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 208        struct req_que **, struct rsp_que **);
 209static void qla2x00_mem_free(struct qla_hw_data *);
 210static void qla2x00_sp_free_dma(srb_t *);
 211
 212/* -------------------------------------------------------------------------- */
 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
 214{
 215        ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
 216                                GFP_KERNEL);
 217        if (!ha->req_q_map) {
 218                qla_printk(KERN_WARNING, ha,
 219                        "Unable to allocate memory for request queue ptrs\n");
 220                goto fail_req_map;
 221        }
 222
 223        ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
 224                                GFP_KERNEL);
 225        if (!ha->rsp_q_map) {
 226                qla_printk(KERN_WARNING, ha,
 227                        "Unable to allocate memory for response queue ptrs\n");
 228                goto fail_rsp_map;
 229        }
 230        set_bit(0, ha->rsp_qid_map);
 231        set_bit(0, ha->req_qid_map);
 232        return 1;
 233
 234fail_rsp_map:
 235        kfree(ha->req_q_map);
 236        ha->req_q_map = NULL;
 237fail_req_map:
 238        return -ENOMEM;
 239}
 240
 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 242{
 243        if (req && req->ring)
 244                dma_free_coherent(&ha->pdev->dev,
 245                (req->length + 1) * sizeof(request_t),
 246                req->ring, req->dma);
 247
 248        kfree(req);
 249        req = NULL;
 250}
 251
 252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 253{
 254        if (rsp && rsp->ring)
 255                dma_free_coherent(&ha->pdev->dev,
 256                (rsp->length + 1) * sizeof(response_t),
 257                rsp->ring, rsp->dma);
 258
 259        kfree(rsp);
 260        rsp = NULL;
 261}
 262
 263static void qla2x00_free_queues(struct qla_hw_data *ha)
 264{
 265        struct req_que *req;
 266        struct rsp_que *rsp;
 267        int cnt;
 268
 269        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
 270                req = ha->req_q_map[cnt];
 271                qla2x00_free_req_que(ha, req);
 272        }
 273        kfree(ha->req_q_map);
 274        ha->req_q_map = NULL;
 275
 276        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
 277                rsp = ha->rsp_q_map[cnt];
 278                qla2x00_free_rsp_que(ha, rsp);
 279        }
 280        kfree(ha->rsp_q_map);
 281        ha->rsp_q_map = NULL;
 282}
 283
 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
 285{
 286        uint16_t options = 0;
 287        int ques, req, ret;
 288        struct qla_hw_data *ha = vha->hw;
 289
 290        if (!(ha->fw_attributes & BIT_6)) {
 291                qla_printk(KERN_INFO, ha,
 292                        "Firmware is not multi-queue capable\n");
 293                goto fail;
 294        }
 295        if (ql2xmultique_tag) {
 296                /* create a request queue for IO */
 297                options |= BIT_7;
 298                req = qla25xx_create_req_que(ha, options, 0, 0, -1,
 299                        QLA_DEFAULT_QUE_QOS);
 300                if (!req) {
 301                        qla_printk(KERN_WARNING, ha,
 302                                "Can't create request queue\n");
 303                        goto fail;
 304                }
 305                ha->wq = create_workqueue("qla2xxx_wq");
 306                vha->req = ha->req_q_map[req];
 307                options |= BIT_1;
 308                for (ques = 1; ques < ha->max_rsp_queues; ques++) {
 309                        ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
 310                        if (!ret) {
 311                                qla_printk(KERN_WARNING, ha,
 312                                        "Response Queue create failed\n");
 313                                goto fail2;
 314                        }
 315                }
 316                ha->flags.cpu_affinity_enabled = 1;
 317
 318                DEBUG2(qla_printk(KERN_INFO, ha,
 319                        "CPU affinity mode enabled, no. of response"
 320                        " queues:%d, no. of request queues:%d\n",
 321                        ha->max_rsp_queues, ha->max_req_queues));
 322        }
 323        return 0;
 324fail2:
 325        qla25xx_delete_queues(vha);
 326        destroy_workqueue(ha->wq);
 327        ha->wq = NULL;
 328fail:
 329        ha->mqenable = 0;
 330        kfree(ha->req_q_map);
 331        kfree(ha->rsp_q_map);
 332        ha->max_req_queues = ha->max_rsp_queues = 1;
 333        return 1;
 334}
 335
 336static char *
 337qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
 338{
 339        struct qla_hw_data *ha = vha->hw;
 340        static char *pci_bus_modes[] = {
 341                "33", "66", "100", "133",
 342        };
 343        uint16_t pci_bus;
 344
 345        strcpy(str, "PCI");
 346        pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
 347        if (pci_bus) {
 348                strcat(str, "-X (");
 349                strcat(str, pci_bus_modes[pci_bus]);
 350        } else {
 351                pci_bus = (ha->pci_attr & BIT_8) >> 8;
 352                strcat(str, " (");
 353                strcat(str, pci_bus_modes[pci_bus]);
 354        }
 355        strcat(str, " MHz)");
 356
 357        return (str);
 358}
 359
 360static char *
 361qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
 362{
 363        static char *pci_bus_modes[] = { "33", "66", "100", "133", };
 364        struct qla_hw_data *ha = vha->hw;
 365        uint32_t pci_bus;
 366        int pcie_reg;
 367
 368        pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
 369        if (pcie_reg) {
 370                char lwstr[6];
 371                uint16_t pcie_lstat, lspeed, lwidth;
 372
 373                pcie_reg += 0x12;
 374                pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
 375                lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
 376                lwidth = (pcie_lstat &
 377                    (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
 378
 379                strcpy(str, "PCIe (");
 380                if (lspeed == 1)
 381                        strcat(str, "2.5GT/s ");
 382                else if (lspeed == 2)
 383                        strcat(str, "5.0GT/s ");
 384                else
 385                        strcat(str, "<unknown> ");
 386                snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
 387                strcat(str, lwstr);
 388
 389                return str;
 390        }
 391
 392        strcpy(str, "PCI");
 393        pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
 394        if (pci_bus == 0 || pci_bus == 8) {
 395                strcat(str, " (");
 396                strcat(str, pci_bus_modes[pci_bus >> 3]);
 397        } else {
 398                strcat(str, "-X ");
 399                if (pci_bus & BIT_2)
 400                        strcat(str, "Mode 2");
 401                else
 402                        strcat(str, "Mode 1");
 403                strcat(str, " (");
 404                strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
 405        }
 406        strcat(str, " MHz)");
 407
 408        return str;
 409}
 410
 411static char *
 412qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
 413{
 414        char un_str[10];
 415        struct qla_hw_data *ha = vha->hw;
 416
 417        sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
 418            ha->fw_minor_version,
 419            ha->fw_subminor_version);
 420
 421        if (ha->fw_attributes & BIT_9) {
 422                strcat(str, "FLX");
 423                return (str);
 424        }
 425
 426        switch (ha->fw_attributes & 0xFF) {
 427        case 0x7:
 428                strcat(str, "EF");
 429                break;
 430        case 0x17:
 431                strcat(str, "TP");
 432                break;
 433        case 0x37:
 434                strcat(str, "IP");
 435                break;
 436        case 0x77:
 437                strcat(str, "VI");
 438                break;
 439        default:
 440                sprintf(un_str, "(%x)", ha->fw_attributes);
 441                strcat(str, un_str);
 442                break;
 443        }
 444        if (ha->fw_attributes & 0x100)
 445                strcat(str, "X");
 446
 447        return (str);
 448}
 449
 450static char *
 451qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
 452{
 453        struct qla_hw_data *ha = vha->hw;
 454
 455        sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
 456            ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 457        return str;
 458}
 459
 460static inline srb_t *
 461qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
 462    struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 463{
 464        srb_t *sp;
 465        struct qla_hw_data *ha = vha->hw;
 466
 467        sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
 468        if (!sp)
 469                return sp;
 470
 471        sp->fcport = fcport;
 472        sp->cmd = cmd;
 473        sp->flags = 0;
 474        CMD_SP(cmd) = (void *)sp;
 475        cmd->scsi_done = done;
 476        sp->ctx = NULL;
 477
 478        return sp;
 479}
 480
 481static int
 482qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 483{
 484        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 485        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 486        struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 487        struct qla_hw_data *ha = vha->hw;
 488        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 489        srb_t *sp;
 490        int rval;
 491
 492        if (unlikely(pci_channel_offline(ha->pdev))) {
 493                if (ha->pdev->error_state == pci_channel_io_frozen)
 494                        cmd->result = DID_REQUEUE << 16;
 495                else
 496                        cmd->result = DID_NO_CONNECT << 16;
 497                goto qc24_fail_command;
 498        }
 499
 500        rval = fc_remote_port_chkready(rport);
 501        if (rval) {
 502                cmd->result = rval;
 503                goto qc24_fail_command;
 504        }
 505
 506        /* Close window on fcport/rport state-transitioning. */
 507        if (fcport->drport)
 508                goto qc24_target_busy;
 509
 510        if (atomic_read(&fcport->state) != FCS_ONLINE) {
 511                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 512                    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 513                        cmd->result = DID_NO_CONNECT << 16;
 514                        goto qc24_fail_command;
 515                }
 516                goto qc24_target_busy;
 517        }
 518
 519        spin_unlock_irq(vha->host->host_lock);
 520
 521        sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
 522        if (!sp)
 523                goto qc24_host_busy_lock;
 524
 525        rval = ha->isp_ops->start_scsi(sp);
 526        if (rval != QLA_SUCCESS)
 527                goto qc24_host_busy_free_sp;
 528
 529        spin_lock_irq(vha->host->host_lock);
 530
 531        return 0;
 532
 533qc24_host_busy_free_sp:
 534        qla2x00_sp_free_dma(sp);
 535        mempool_free(sp, ha->srb_mempool);
 536
 537qc24_host_busy_lock:
 538        spin_lock_irq(vha->host->host_lock);
 539        return SCSI_MLQUEUE_HOST_BUSY;
 540
 541qc24_target_busy:
 542        return SCSI_MLQUEUE_TARGET_BUSY;
 543
 544qc24_fail_command:
 545        done(cmd);
 546
 547        return 0;
 548}
 549
 550
 551/*
 552 * qla2x00_eh_wait_on_command
 553 *    Waits for the command to be returned by the Firmware for some
 554 *    max time.
 555 *
 556 * Input:
 557 *    cmd = Scsi Command to wait on.
 558 *
 559 * Return:
 560 *    Not Found : 0
 561 *    Found : 1
 562 */
 563static int
 564qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
 565{
 566#define ABORT_POLLING_PERIOD    1000
 567#define ABORT_WAIT_ITER         ((10 * 1000) / (ABORT_POLLING_PERIOD))
 568        unsigned long wait_iter = ABORT_WAIT_ITER;
 569        int ret = QLA_SUCCESS;
 570
 571        while (CMD_SP(cmd) && wait_iter--) {
 572                msleep(ABORT_POLLING_PERIOD);
 573        }
 574        if (CMD_SP(cmd))
 575                ret = QLA_FUNCTION_FAILED;
 576
 577        return ret;
 578}
 579
 580/*
 581 * qla2x00_wait_for_hba_online
 582 *    Wait till the HBA is online after going through
 583 *    <= MAX_RETRIES_OF_ISP_ABORT  or
 584 *    finally HBA is disabled ie marked offline
 585 *
 586 * Input:
 587 *     ha - pointer to host adapter structure
 588 *
 589 * Note:
 590 *    Does context switching-Release SPIN_LOCK
 591 *    (if any) before calling this routine.
 592 *
 593 * Return:
 594 *    Success (Adapter is online) : 0
 595 *    Failed  (Adapter is offline/disabled) : 1
 596 */
 597int
 598qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
 599{
 600        int             return_status;
 601        unsigned long   wait_online;
 602        struct qla_hw_data *ha = vha->hw;
 603        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 604
 605        wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 606        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
 607            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 608            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 609            ha->dpc_active) && time_before(jiffies, wait_online)) {
 610
 611                msleep(1000);
 612        }
 613        if (base_vha->flags.online)
 614                return_status = QLA_SUCCESS;
 615        else
 616                return_status = QLA_FUNCTION_FAILED;
 617
 618        return (return_status);
 619}
 620
 621int
 622qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
 623{
 624        int             return_status;
 625        unsigned long   wait_reset;
 626        struct qla_hw_data *ha = vha->hw;
 627        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 628
 629        wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 630        while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
 631            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 632            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 633            ha->dpc_active) && time_before(jiffies, wait_reset)) {
 634
 635                msleep(1000);
 636
 637                if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
 638                    ha->flags.chip_reset_done)
 639                        break;
 640        }
 641        if (ha->flags.chip_reset_done)
 642                return_status = QLA_SUCCESS;
 643        else
 644                return_status = QLA_FUNCTION_FAILED;
 645
 646        return return_status;
 647}
 648
 649/*
 650 * qla2x00_wait_for_loop_ready
 651 *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
 652 *    to be in LOOP_READY state.
 653 * Input:
 654 *     ha - pointer to host adapter structure
 655 *
 656 * Note:
 657 *    Does context switching-Release SPIN_LOCK
 658 *    (if any) before calling this routine.
 659 *
 660 *
 661 * Return:
 662 *    Success (LOOP_READY) : 0
 663 *    Failed  (LOOP_NOT_READY) : 1
 664 */
 665static inline int
 666qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
 667{
 668        int      return_status = QLA_SUCCESS;
 669        unsigned long loop_timeout ;
 670        struct qla_hw_data *ha = vha->hw;
 671        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 672
 673        /* wait for 5 min at the max for loop to be ready */
 674        loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 675
 676        while ((!atomic_read(&base_vha->loop_down_timer) &&
 677            atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
 678            atomic_read(&base_vha->loop_state) != LOOP_READY) {
 679                if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 680                        return_status = QLA_FUNCTION_FAILED;
 681                        break;
 682                }
 683                msleep(1000);
 684                if (time_after_eq(jiffies, loop_timeout)) {
 685                        return_status = QLA_FUNCTION_FAILED;
 686                        break;
 687                }
 688        }
 689        return (return_status);
 690}
 691
 692void
 693qla2x00_abort_fcport_cmds(fc_port_t *fcport)
 694{
 695        int cnt;
 696        unsigned long flags;
 697        srb_t *sp;
 698        scsi_qla_host_t *vha = fcport->vha;
 699        struct qla_hw_data *ha = vha->hw;
 700        struct req_que *req;
 701
 702        spin_lock_irqsave(&ha->hardware_lock, flags);
 703        req = vha->req;
 704        for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 705                sp = req->outstanding_cmds[cnt];
 706                if (!sp)
 707                        continue;
 708                if (sp->fcport != fcport)
 709                        continue;
 710                if (sp->ctx)
 711                        continue;
 712
 713                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 714                if (ha->isp_ops->abort_command(sp)) {
 715                        DEBUG2(qla_printk(KERN_WARNING, ha,
 716                        "Abort failed --  %lx\n",
 717                        sp->cmd->serial_number));
 718                } else {
 719                        if (qla2x00_eh_wait_on_command(sp->cmd) !=
 720                                QLA_SUCCESS)
 721                                DEBUG2(qla_printk(KERN_WARNING, ha,
 722                                "Abort failed while waiting --  %lx\n",
 723                                sp->cmd->serial_number));
 724                }
 725                spin_lock_irqsave(&ha->hardware_lock, flags);
 726        }
 727        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 728}
 729
 730static void
 731qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
 732{
 733        struct Scsi_Host *shost = cmnd->device->host;
 734        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 735        unsigned long flags;
 736
 737        spin_lock_irqsave(shost->host_lock, flags);
 738        while (rport->port_state == FC_PORTSTATE_BLOCKED) {
 739                spin_unlock_irqrestore(shost->host_lock, flags);
 740                msleep(1000);
 741                spin_lock_irqsave(shost->host_lock, flags);
 742        }
 743        spin_unlock_irqrestore(shost->host_lock, flags);
 744        return;
 745}
 746
 747/**************************************************************************
 748* qla2xxx_eh_abort
 749*
 750* Description:
 751*    The abort function will abort the specified command.
 752*
 753* Input:
 754*    cmd = Linux SCSI command packet to be aborted.
 755*
 756* Returns:
 757*    Either SUCCESS or FAILED.
 758*
 759* Note:
 760*    Only return FAILED if command not returned by firmware.
 761**************************************************************************/
 762static int
 763qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 764{
 765        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 766        srb_t *sp;
 767        int ret, i;
 768        unsigned int id, lun;
 769        unsigned long serial;
 770        unsigned long flags;
 771        int wait = 0;
 772        struct qla_hw_data *ha = vha->hw;
 773        struct req_que *req = vha->req;
 774        srb_t *spt;
 775
 776        qla2x00_block_error_handler(cmd);
 777
 778        if (!CMD_SP(cmd))
 779                return SUCCESS;
 780
 781        ret = SUCCESS;
 782
 783        id = cmd->device->id;
 784        lun = cmd->device->lun;
 785        serial = cmd->serial_number;
 786        spt = (srb_t *) CMD_SP(cmd);
 787        if (!spt)
 788                return SUCCESS;
 789
 790        /* Check active list for command command. */
 791        spin_lock_irqsave(&ha->hardware_lock, flags);
 792        for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
 793                sp = req->outstanding_cmds[i];
 794
 795                if (sp == NULL)
 796                        continue;
 797                if (sp->ctx)
 798                        continue;
 799                if (sp->cmd != cmd)
 800                        continue;
 801
 802                DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
 803                " pid=%ld.\n", __func__, vha->host_no, sp, serial));
 804
 805                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 806                if (ha->isp_ops->abort_command(sp)) {
 807                        DEBUG2(printk("%s(%ld): abort_command "
 808                        "mbx failed.\n", __func__, vha->host_no));
 809                        ret = FAILED;
 810                } else {
 811                        DEBUG3(printk("%s(%ld): abort_command "
 812                        "mbx success.\n", __func__, vha->host_no));
 813                        wait = 1;
 814                }
 815                spin_lock_irqsave(&ha->hardware_lock, flags);
 816                break;
 817        }
 818        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 819
 820        /* Wait for the command to be returned. */
 821        if (wait) {
 822                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
 823                        qla_printk(KERN_ERR, ha,
 824                            "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
 825                            "%x.\n", vha->host_no, id, lun, serial, ret);
 826                        ret = FAILED;
 827                }
 828        }
 829
 830        qla_printk(KERN_INFO, ha,
 831            "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
 832            vha->host_no, id, lun, wait, serial, ret);
 833
 834        return ret;
 835}
 836
 837enum nexus_wait_type {
 838        WAIT_HOST = 0,
 839        WAIT_TARGET,
 840        WAIT_LUN,
 841};
 842
 843static int
 844qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
 845        unsigned int l, srb_t *sp, enum nexus_wait_type type)
 846{
 847        int cnt, match, status;
 848        unsigned long flags;
 849        struct qla_hw_data *ha = vha->hw;
 850        struct req_que *req;
 851
 852        status = QLA_SUCCESS;
 853        if (!sp)
 854                return status;
 855
 856        spin_lock_irqsave(&ha->hardware_lock, flags);
 857        req = vha->req;
 858        for (cnt = 1; status == QLA_SUCCESS &&
 859                cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 860                sp = req->outstanding_cmds[cnt];
 861                if (!sp)
 862                        continue;
 863                if (sp->ctx)
 864                        continue;
 865                if (vha->vp_idx != sp->fcport->vha->vp_idx)
 866                        continue;
 867                match = 0;
 868                switch (type) {
 869                case WAIT_HOST:
 870                        match = 1;
 871                        break;
 872                case WAIT_TARGET:
 873                        match = sp->cmd->device->id == t;
 874                        break;
 875                case WAIT_LUN:
 876                        match = (sp->cmd->device->id == t &&
 877                                sp->cmd->device->lun == l);
 878                        break;
 879                }
 880                if (!match)
 881                        continue;
 882
 883                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 884                status = qla2x00_eh_wait_on_command(sp->cmd);
 885                spin_lock_irqsave(&ha->hardware_lock, flags);
 886        }
 887        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 888
 889        return status;
 890}
 891
 892static char *reset_errors[] = {
 893        "HBA not online",
 894        "HBA not ready",
 895        "Task management failed",
 896        "Waiting for command completions",
 897};
 898
 899static int
 900__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 901    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
 902{
 903        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 904        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 905        int err;
 906
 907        qla2x00_block_error_handler(cmd);
 908
 909        if (!fcport)
 910                return FAILED;
 911
 912        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
 913            vha->host_no, cmd->device->id, cmd->device->lun, name);
 914
 915        err = 0;
 916        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
 917                goto eh_reset_failed;
 918        err = 1;
 919        if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
 920                goto eh_reset_failed;
 921        err = 2;
 922        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
 923                != QLA_SUCCESS)
 924                goto eh_reset_failed;
 925        err = 3;
 926        if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
 927            cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
 928                goto eh_reset_failed;
 929
 930        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
 931            vha->host_no, cmd->device->id, cmd->device->lun, name);
 932
 933        return SUCCESS;
 934
 935 eh_reset_failed:
 936        qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
 937            , vha->host_no, cmd->device->id, cmd->device->lun, name,
 938            reset_errors[err]);
 939        return FAILED;
 940}
 941
 942static int
 943qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 944{
 945        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 946        struct qla_hw_data *ha = vha->hw;
 947
 948        return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
 949            ha->isp_ops->lun_reset);
 950}
 951
 952static int
 953qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 954{
 955        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 956        struct qla_hw_data *ha = vha->hw;
 957
 958        return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
 959            ha->isp_ops->target_reset);
 960}
 961
 962/**************************************************************************
 963* qla2xxx_eh_bus_reset
 964*
 965* Description:
 966*    The bus reset function will reset the bus and abort any executing
 967*    commands.
 968*
 969* Input:
 970*    cmd = Linux SCSI command packet of the command that cause the
 971*          bus reset.
 972*
 973* Returns:
 974*    SUCCESS/FAILURE (defined as macro in scsi.h).
 975*
 976**************************************************************************/
 977static int
 978qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 979{
 980        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 981        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 982        int ret = FAILED;
 983        unsigned int id, lun;
 984        unsigned long serial;
 985        srb_t *sp = (srb_t *) CMD_SP(cmd);
 986
 987        qla2x00_block_error_handler(cmd);
 988
 989        id = cmd->device->id;
 990        lun = cmd->device->lun;
 991        serial = cmd->serial_number;
 992
 993        if (!fcport)
 994                return ret;
 995
 996        qla_printk(KERN_INFO, vha->hw,
 997            "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
 998
 999        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1000                DEBUG2(printk("%s failed:board disabled\n",__func__));
1001                goto eh_bus_reset_done;
1002        }
1003
1004        if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
1005                if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1006                        ret = SUCCESS;
1007        }
1008        if (ret == FAILED)
1009                goto eh_bus_reset_done;
1010
1011        /* Flush outstanding commands. */
1012        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
1013            QLA_SUCCESS)
1014                ret = FAILED;
1015
1016eh_bus_reset_done:
1017        qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
1018            (ret == FAILED) ? "failed" : "succeded");
1019
1020        return ret;
1021}
1022
1023/**************************************************************************
1024* qla2xxx_eh_host_reset
1025*
1026* Description:
1027*    The reset function will reset the Adapter.
1028*
1029* Input:
1030*      cmd = Linux SCSI command packet of the command that cause the
1031*            adapter reset.
1032*
1033* Returns:
1034*      Either SUCCESS or FAILED.
1035*
1036* Note:
1037**************************************************************************/
1038static int
1039qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1040{
1041        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1042        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1043        struct qla_hw_data *ha = vha->hw;
1044        int ret = FAILED;
1045        unsigned int id, lun;
1046        unsigned long serial;
1047        srb_t *sp = (srb_t *) CMD_SP(cmd);
1048        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1049
1050        qla2x00_block_error_handler(cmd);
1051
1052        id = cmd->device->id;
1053        lun = cmd->device->lun;
1054        serial = cmd->serial_number;
1055
1056        if (!fcport)
1057                return ret;
1058
1059        qla_printk(KERN_INFO, ha,
1060            "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1061
1062        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1063                goto eh_host_reset_lock;
1064
1065        /*
1066         * Fixme-may be dpc thread is active and processing
1067         * loop_resync,so wait a while for it to
1068         * be completed and then issue big hammer.Otherwise
1069         * it may cause I/O failure as big hammer marks the
1070         * devices as lost kicking of the port_down_timer
1071         * while dpc is stuck for the mailbox to complete.
1072         */
1073        qla2x00_wait_for_loop_ready(vha);
1074        if (vha != base_vha) {
1075                if (qla2x00_vp_abort_isp(vha))
1076                        goto eh_host_reset_lock;
1077        } else {
1078                if (ha->wq)
1079                        flush_workqueue(ha->wq);
1080
1081                set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1082                if (qla2x00_abort_isp(base_vha)) {
1083                        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1084                        /* failed. schedule dpc to try */
1085                        set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1086
1087                        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1088                                goto eh_host_reset_lock;
1089                }
1090                clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1091        }
1092
1093        /* Waiting for command to be returned to OS.*/
1094        if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
1095                QLA_SUCCESS)
1096                ret = SUCCESS;
1097
1098eh_host_reset_lock:
1099        qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1100            (ret == FAILED) ? "failed" : "succeded");
1101
1102        return ret;
1103}
1104
1105/*
1106* qla2x00_loop_reset
1107*      Issue loop reset.
1108*
1109* Input:
1110*      ha = adapter block pointer.
1111*
1112* Returns:
1113*      0 = success
1114*/
1115int
1116qla2x00_loop_reset(scsi_qla_host_t *vha)
1117{
1118        int ret;
1119        struct fc_port *fcport;
1120        struct qla_hw_data *ha = vha->hw;
1121
1122        if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1123                ret = qla2x00_full_login_lip(vha);
1124                if (ret != QLA_SUCCESS) {
1125                        DEBUG2_3(printk("%s(%ld): failed: "
1126                            "full_login_lip=%d.\n", __func__, vha->host_no,
1127                            ret));
1128                }
1129                atomic_set(&vha->loop_state, LOOP_DOWN);
1130                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1131                qla2x00_mark_all_devices_lost(vha, 0);
1132                qla2x00_wait_for_loop_ready(vha);
1133        }
1134
1135        if (ha->flags.enable_lip_reset) {
1136                ret = qla2x00_lip_reset(vha);
1137                if (ret != QLA_SUCCESS) {
1138                        DEBUG2_3(printk("%s(%ld): failed: "
1139                            "lip_reset=%d.\n", __func__, vha->host_no, ret));
1140                } else
1141                        qla2x00_wait_for_loop_ready(vha);
1142        }
1143
1144        if (ha->flags.enable_target_reset) {
1145                list_for_each_entry(fcport, &vha->vp_fcports, list) {
1146                        if (fcport->port_type != FCT_TARGET)
1147                                continue;
1148
1149                        ret = ha->isp_ops->target_reset(fcport, 0, 0);
1150                        if (ret != QLA_SUCCESS) {
1151                                DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1152                                    "target_reset=%d d_id=%x.\n", __func__,
1153                                    vha->host_no, ret, fcport->d_id.b24));
1154                        }
1155                }
1156        }
1157        /* Issue marker command only when we are going to start the I/O */
1158        vha->marker_needed = 1;
1159
1160        return QLA_SUCCESS;
1161}
1162
1163void
1164qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1165{
1166        int que, cnt;
1167        unsigned long flags;
1168        srb_t *sp;
1169        struct srb_ctx *ctx;
1170        struct qla_hw_data *ha = vha->hw;
1171        struct req_que *req;
1172
1173        spin_lock_irqsave(&ha->hardware_lock, flags);
1174        for (que = 0; que < ha->max_req_queues; que++) {
1175                req = ha->req_q_map[que];
1176                if (!req)
1177                        continue;
1178                for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1179                        sp = req->outstanding_cmds[cnt];
1180                        if (sp) {
1181                                req->outstanding_cmds[cnt] = NULL;
1182                                if (!sp->ctx) {
1183                                        sp->cmd->result = res;
1184                                        qla2x00_sp_compl(ha, sp);
1185                                } else {
1186                                        ctx = sp->ctx;
1187                                        del_timer_sync(&ctx->timer);
1188                                        ctx->free(sp);
1189                                }
1190                        }
1191                }
1192        }
1193        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1194}
1195
1196static int
1197qla2xxx_slave_alloc(struct scsi_device *sdev)
1198{
1199        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1200
1201        if (!rport || fc_remote_port_chkready(rport))
1202                return -ENXIO;
1203
1204        sdev->hostdata = *(fc_port_t **)rport->dd_data;
1205
1206        return 0;
1207}
1208
1209static int
1210qla2xxx_slave_configure(struct scsi_device *sdev)
1211{
1212        scsi_qla_host_t *vha = shost_priv(sdev->host);
1213        struct qla_hw_data *ha = vha->hw;
1214        struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1215        fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1216        struct req_que *req = vha->req;
1217
1218        if (sdev->tagged_supported)
1219                scsi_activate_tcq(sdev, req->max_q_depth);
1220        else
1221                scsi_deactivate_tcq(sdev, req->max_q_depth);
1222
1223        rport->dev_loss_tmo = ha->port_down_retry_count;
1224        if (sdev->type == TYPE_TAPE)
1225                fcport->flags |= FCF_TAPE_PRESENT;
1226
1227        return 0;
1228}
1229
1230static void
1231qla2xxx_slave_destroy(struct scsi_device *sdev)
1232{
1233        sdev->hostdata = NULL;
1234}
1235
1236static int
1237qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
1238{
1239        scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1240        return sdev->queue_depth;
1241}
1242
1243static int
1244qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1245{
1246        if (sdev->tagged_supported) {
1247                scsi_set_tag_type(sdev, tag_type);
1248                if (tag_type)
1249                        scsi_activate_tcq(sdev, sdev->queue_depth);
1250                else
1251                        scsi_deactivate_tcq(sdev, sdev->queue_depth);
1252        } else
1253                tag_type = 0;
1254
1255        return tag_type;
1256}
1257
1258/**
1259 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1260 * @ha: HA context
1261 *
1262 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1263 * supported addressing method.
1264 */
1265static void
1266qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1267{
1268        /* Assume a 32bit DMA mask. */
1269        ha->flags.enable_64bit_addressing = 0;
1270
1271        if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1272                /* Any upper-dword bits set? */
1273                if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1274                    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1275                        /* Ok, a 64bit DMA mask is applicable. */
1276                        ha->flags.enable_64bit_addressing = 1;
1277                        ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1278                        ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1279                        return;
1280                }
1281        }
1282
1283        dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1284        pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1285}
1286
1287static void
1288qla2x00_enable_intrs(struct qla_hw_data *ha)
1289{
1290        unsigned long flags = 0;
1291        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1292
1293        spin_lock_irqsave(&ha->hardware_lock, flags);
1294        ha->interrupts_on = 1;
1295        /* enable risc and host interrupts */
1296        WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1297        RD_REG_WORD(&reg->ictrl);
1298        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1299
1300}
1301
1302static void
1303qla2x00_disable_intrs(struct qla_hw_data *ha)
1304{
1305        unsigned long flags = 0;
1306        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1307
1308        spin_lock_irqsave(&ha->hardware_lock, flags);
1309        ha->interrupts_on = 0;
1310        /* disable risc and host interrupts */
1311        WRT_REG_WORD(&reg->ictrl, 0);
1312        RD_REG_WORD(&reg->ictrl);
1313        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1314}
1315
1316static void
1317qla24xx_enable_intrs(struct qla_hw_data *ha)
1318{
1319        unsigned long flags = 0;
1320        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1321
1322        spin_lock_irqsave(&ha->hardware_lock, flags);
1323        ha->interrupts_on = 1;
1324        WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1325        RD_REG_DWORD(&reg->ictrl);
1326        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1327}
1328
1329static void
1330qla24xx_disable_intrs(struct qla_hw_data *ha)
1331{
1332        unsigned long flags = 0;
1333        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1334
1335        if (IS_NOPOLLING_TYPE(ha))
1336                return;
1337        spin_lock_irqsave(&ha->hardware_lock, flags);
1338        ha->interrupts_on = 0;
1339        WRT_REG_DWORD(&reg->ictrl, 0);
1340        RD_REG_DWORD(&reg->ictrl);
1341        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1342}
1343
1344static struct isp_operations qla2100_isp_ops = {
1345        .pci_config             = qla2100_pci_config,
1346        .reset_chip             = qla2x00_reset_chip,
1347        .chip_diag              = qla2x00_chip_diag,
1348        .config_rings           = qla2x00_config_rings,
1349        .reset_adapter          = qla2x00_reset_adapter,
1350        .nvram_config           = qla2x00_nvram_config,
1351        .update_fw_options      = qla2x00_update_fw_options,
1352        .load_risc              = qla2x00_load_risc,
1353        .pci_info_str           = qla2x00_pci_info_str,
1354        .fw_version_str         = qla2x00_fw_version_str,
1355        .intr_handler           = qla2100_intr_handler,
1356        .enable_intrs           = qla2x00_enable_intrs,
1357        .disable_intrs          = qla2x00_disable_intrs,
1358        .abort_command          = qla2x00_abort_command,
1359        .target_reset           = qla2x00_abort_target,
1360        .lun_reset              = qla2x00_lun_reset,
1361        .fabric_login           = qla2x00_login_fabric,
1362        .fabric_logout          = qla2x00_fabric_logout,
1363        .calc_req_entries       = qla2x00_calc_iocbs_32,
1364        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1365        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1366        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1367        .read_nvram             = qla2x00_read_nvram_data,
1368        .write_nvram            = qla2x00_write_nvram_data,
1369        .fw_dump                = qla2100_fw_dump,
1370        .beacon_on              = NULL,
1371        .beacon_off             = NULL,
1372        .beacon_blink           = NULL,
1373        .read_optrom            = qla2x00_read_optrom_data,
1374        .write_optrom           = qla2x00_write_optrom_data,
1375        .get_flash_version      = qla2x00_get_flash_version,
1376        .start_scsi             = qla2x00_start_scsi,
1377};
1378
1379static struct isp_operations qla2300_isp_ops = {
1380        .pci_config             = qla2300_pci_config,
1381        .reset_chip             = qla2x00_reset_chip,
1382        .chip_diag              = qla2x00_chip_diag,
1383        .config_rings           = qla2x00_config_rings,
1384        .reset_adapter          = qla2x00_reset_adapter,
1385        .nvram_config           = qla2x00_nvram_config,
1386        .update_fw_options      = qla2x00_update_fw_options,
1387        .load_risc              = qla2x00_load_risc,
1388        .pci_info_str           = qla2x00_pci_info_str,
1389        .fw_version_str         = qla2x00_fw_version_str,
1390        .intr_handler           = qla2300_intr_handler,
1391        .enable_intrs           = qla2x00_enable_intrs,
1392        .disable_intrs          = qla2x00_disable_intrs,
1393        .abort_command          = qla2x00_abort_command,
1394        .target_reset           = qla2x00_abort_target,
1395        .lun_reset              = qla2x00_lun_reset,
1396        .fabric_login           = qla2x00_login_fabric,
1397        .fabric_logout          = qla2x00_fabric_logout,
1398        .calc_req_entries       = qla2x00_calc_iocbs_32,
1399        .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1400        .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1401        .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1402        .read_nvram             = qla2x00_read_nvram_data,
1403        .write_nvram            = qla2x00_write_nvram_data,
1404        .fw_dump                = qla2300_fw_dump,
1405        .beacon_on              = qla2x00_beacon_on,
1406        .beacon_off             = qla2x00_beacon_off,
1407        .beacon_blink           = qla2x00_beacon_blink,
1408        .read_optrom            = qla2x00_read_optrom_data,
1409        .write_optrom           = qla2x00_write_optrom_data,
1410        .get_flash_version      = qla2x00_get_flash_version,
1411        .start_scsi             = qla2x00_start_scsi,
1412};
1413
1414static struct isp_operations qla24xx_isp_ops = {
1415        .pci_config             = qla24xx_pci_config,
1416        .reset_chip             = qla24xx_reset_chip,
1417        .chip_diag              = qla24xx_chip_diag,
1418        .config_rings           = qla24xx_config_rings,
1419        .reset_adapter          = qla24xx_reset_adapter,
1420        .nvram_config           = qla24xx_nvram_config,
1421        .update_fw_options      = qla24xx_update_fw_options,
1422        .load_risc              = qla24xx_load_risc,
1423        .pci_info_str           = qla24xx_pci_info_str,
1424        .fw_version_str         = qla24xx_fw_version_str,
1425        .intr_handler           = qla24xx_intr_handler,
1426        .enable_intrs           = qla24xx_enable_intrs,
1427        .disable_intrs          = qla24xx_disable_intrs,
1428        .abort_command          = qla24xx_abort_command,
1429        .target_reset           = qla24xx_abort_target,
1430        .lun_reset              = qla24xx_lun_reset,
1431        .fabric_login           = qla24xx_login_fabric,
1432        .fabric_logout          = qla24xx_fabric_logout,
1433        .calc_req_entries       = NULL,
1434        .build_iocbs            = NULL,
1435        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1436        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1437        .read_nvram             = qla24xx_read_nvram_data,
1438        .write_nvram            = qla24xx_write_nvram_data,
1439        .fw_dump                = qla24xx_fw_dump,
1440        .beacon_on              = qla24xx_beacon_on,
1441        .beacon_off             = qla24xx_beacon_off,
1442        .beacon_blink           = qla24xx_beacon_blink,
1443        .read_optrom            = qla24xx_read_optrom_data,
1444        .write_optrom           = qla24xx_write_optrom_data,
1445        .get_flash_version      = qla24xx_get_flash_version,
1446        .start_scsi             = qla24xx_start_scsi,
1447};
1448
1449static struct isp_operations qla25xx_isp_ops = {
1450        .pci_config             = qla25xx_pci_config,
1451        .reset_chip             = qla24xx_reset_chip,
1452        .chip_diag              = qla24xx_chip_diag,
1453        .config_rings           = qla24xx_config_rings,
1454        .reset_adapter          = qla24xx_reset_adapter,
1455        .nvram_config           = qla24xx_nvram_config,
1456        .update_fw_options      = qla24xx_update_fw_options,
1457        .load_risc              = qla24xx_load_risc,
1458        .pci_info_str           = qla24xx_pci_info_str,
1459        .fw_version_str         = qla24xx_fw_version_str,
1460        .intr_handler           = qla24xx_intr_handler,
1461        .enable_intrs           = qla24xx_enable_intrs,
1462        .disable_intrs          = qla24xx_disable_intrs,
1463        .abort_command          = qla24xx_abort_command,
1464        .target_reset           = qla24xx_abort_target,
1465        .lun_reset              = qla24xx_lun_reset,
1466        .fabric_login           = qla24xx_login_fabric,
1467        .fabric_logout          = qla24xx_fabric_logout,
1468        .calc_req_entries       = NULL,
1469        .build_iocbs            = NULL,
1470        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1471        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1472        .read_nvram             = qla25xx_read_nvram_data,
1473        .write_nvram            = qla25xx_write_nvram_data,
1474        .fw_dump                = qla25xx_fw_dump,
1475        .beacon_on              = qla24xx_beacon_on,
1476        .beacon_off             = qla24xx_beacon_off,
1477        .beacon_blink           = qla24xx_beacon_blink,
1478        .read_optrom            = qla25xx_read_optrom_data,
1479        .write_optrom           = qla24xx_write_optrom_data,
1480        .get_flash_version      = qla24xx_get_flash_version,
1481        .start_scsi             = qla24xx_start_scsi,
1482};
1483
1484static struct isp_operations qla81xx_isp_ops = {
1485        .pci_config             = qla25xx_pci_config,
1486        .reset_chip             = qla24xx_reset_chip,
1487        .chip_diag              = qla24xx_chip_diag,
1488        .config_rings           = qla24xx_config_rings,
1489        .reset_adapter          = qla24xx_reset_adapter,
1490        .nvram_config           = qla81xx_nvram_config,
1491        .update_fw_options      = qla81xx_update_fw_options,
1492        .load_risc              = qla81xx_load_risc,
1493        .pci_info_str           = qla24xx_pci_info_str,
1494        .fw_version_str         = qla24xx_fw_version_str,
1495        .intr_handler           = qla24xx_intr_handler,
1496        .enable_intrs           = qla24xx_enable_intrs,
1497        .disable_intrs          = qla24xx_disable_intrs,
1498        .abort_command          = qla24xx_abort_command,
1499        .target_reset           = qla24xx_abort_target,
1500        .lun_reset              = qla24xx_lun_reset,
1501        .fabric_login           = qla24xx_login_fabric,
1502        .fabric_logout          = qla24xx_fabric_logout,
1503        .calc_req_entries       = NULL,
1504        .build_iocbs            = NULL,
1505        .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1506        .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1507        .read_nvram             = NULL,
1508        .write_nvram            = NULL,
1509        .fw_dump                = qla81xx_fw_dump,
1510        .beacon_on              = qla24xx_beacon_on,
1511        .beacon_off             = qla24xx_beacon_off,
1512        .beacon_blink           = qla24xx_beacon_blink,
1513        .read_optrom            = qla25xx_read_optrom_data,
1514        .write_optrom           = qla24xx_write_optrom_data,
1515        .get_flash_version      = qla24xx_get_flash_version,
1516        .start_scsi             = qla24xx_start_scsi,
1517};
1518
1519static inline void
1520qla2x00_set_isp_flags(struct qla_hw_data *ha)
1521{
1522        ha->device_type = DT_EXTENDED_IDS;
1523        switch (ha->pdev->device) {
1524        case PCI_DEVICE_ID_QLOGIC_ISP2100:
1525                ha->device_type |= DT_ISP2100;
1526                ha->device_type &= ~DT_EXTENDED_IDS;
1527                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1528                break;
1529        case PCI_DEVICE_ID_QLOGIC_ISP2200:
1530                ha->device_type |= DT_ISP2200;
1531                ha->device_type &= ~DT_EXTENDED_IDS;
1532                ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1533                break;
1534        case PCI_DEVICE_ID_QLOGIC_ISP2300:
1535                ha->device_type |= DT_ISP2300;
1536                ha->device_type |= DT_ZIO_SUPPORTED;
1537                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1538                break;
1539        case PCI_DEVICE_ID_QLOGIC_ISP2312:
1540                ha->device_type |= DT_ISP2312;
1541                ha->device_type |= DT_ZIO_SUPPORTED;
1542                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1543                break;
1544        case PCI_DEVICE_ID_QLOGIC_ISP2322:
1545                ha->device_type |= DT_ISP2322;
1546                ha->device_type |= DT_ZIO_SUPPORTED;
1547                if (ha->pdev->subsystem_vendor == 0x1028 &&
1548                    ha->pdev->subsystem_device == 0x0170)
1549                        ha->device_type |= DT_OEM_001;
1550                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1551                break;
1552        case PCI_DEVICE_ID_QLOGIC_ISP6312:
1553                ha->device_type |= DT_ISP6312;
1554                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1555                break;
1556        case PCI_DEVICE_ID_QLOGIC_ISP6322:
1557                ha->device_type |= DT_ISP6322;
1558                ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1559                break;
1560        case PCI_DEVICE_ID_QLOGIC_ISP2422:
1561                ha->device_type |= DT_ISP2422;
1562                ha->device_type |= DT_ZIO_SUPPORTED;
1563                ha->device_type |= DT_FWI2;
1564                ha->device_type |= DT_IIDMA;
1565                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1566                break;
1567        case PCI_DEVICE_ID_QLOGIC_ISP2432:
1568                ha->device_type |= DT_ISP2432;
1569                ha->device_type |= DT_ZIO_SUPPORTED;
1570                ha->device_type |= DT_FWI2;
1571                ha->device_type |= DT_IIDMA;
1572                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1573                break;
1574        case PCI_DEVICE_ID_QLOGIC_ISP8432:
1575                ha->device_type |= DT_ISP8432;
1576                ha->device_type |= DT_ZIO_SUPPORTED;
1577                ha->device_type |= DT_FWI2;
1578                ha->device_type |= DT_IIDMA;
1579                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1580                break;
1581        case PCI_DEVICE_ID_QLOGIC_ISP5422:
1582                ha->device_type |= DT_ISP5422;
1583                ha->device_type |= DT_FWI2;
1584                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1585                break;
1586        case PCI_DEVICE_ID_QLOGIC_ISP5432:
1587                ha->device_type |= DT_ISP5432;
1588                ha->device_type |= DT_FWI2;
1589                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1590                break;
1591        case PCI_DEVICE_ID_QLOGIC_ISP2532:
1592                ha->device_type |= DT_ISP2532;
1593                ha->device_type |= DT_ZIO_SUPPORTED;
1594                ha->device_type |= DT_FWI2;
1595                ha->device_type |= DT_IIDMA;
1596                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1597                break;
1598        case PCI_DEVICE_ID_QLOGIC_ISP8001:
1599                ha->device_type |= DT_ISP8001;
1600                ha->device_type |= DT_ZIO_SUPPORTED;
1601                ha->device_type |= DT_FWI2;
1602                ha->device_type |= DT_IIDMA;
1603                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1604                break;
1605        }
1606
1607        /* Get adapter physical port no from interrupt pin register. */
1608        pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1609        if (ha->port_no & 1)
1610                ha->flags.port0 = 1;
1611        else
1612                ha->flags.port0 = 0;
1613}
1614
1615static int
1616qla2x00_iospace_config(struct qla_hw_data *ha)
1617{
1618        resource_size_t pio;
1619        uint16_t msix;
1620        int cpus;
1621
1622        if (pci_request_selected_regions(ha->pdev, ha->bars,
1623            QLA2XXX_DRIVER_NAME)) {
1624                qla_printk(KERN_WARNING, ha,
1625                    "Failed to reserve PIO/MMIO regions (%s)\n",
1626                    pci_name(ha->pdev));
1627
1628                goto iospace_error_exit;
1629        }
1630        if (!(ha->bars & 1))
1631                goto skip_pio;
1632
1633        /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1634        pio = pci_resource_start(ha->pdev, 0);
1635        if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1636                if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1637                        qla_printk(KERN_WARNING, ha,
1638                            "Invalid PCI I/O region size (%s)...\n",
1639                                pci_name(ha->pdev));
1640                        pio = 0;
1641                }
1642        } else {
1643                qla_printk(KERN_WARNING, ha,
1644                    "region #0 not a PIO resource (%s)...\n",
1645                    pci_name(ha->pdev));
1646                pio = 0;
1647        }
1648        ha->pio_address = pio;
1649
1650skip_pio:
1651        /* Use MMIO operations for all accesses. */
1652        if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1653                qla_printk(KERN_ERR, ha,
1654                    "region #1 not an MMIO resource (%s), aborting\n",
1655                    pci_name(ha->pdev));
1656                goto iospace_error_exit;
1657        }
1658        if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1659                qla_printk(KERN_ERR, ha,
1660                    "Invalid PCI mem region size (%s), aborting\n",
1661                        pci_name(ha->pdev));
1662                goto iospace_error_exit;
1663        }
1664
1665        ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1666        if (!ha->iobase) {
1667                qla_printk(KERN_ERR, ha,
1668                    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1669
1670                goto iospace_error_exit;
1671        }
1672
1673        /* Determine queue resources */
1674        ha->max_req_queues = ha->max_rsp_queues = 1;
1675        if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1676                (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1677                goto mqiobase_exit;
1678        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1679                        pci_resource_len(ha->pdev, 3));
1680        if (ha->mqiobase) {
1681                /* Read MSIX vector size of the board */
1682                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1683                ha->msix_count = msix;
1684                /* Max queues are bounded by available msix vectors */
1685                /* queue 0 uses two msix vectors */
1686                if (ql2xmultique_tag) {
1687                        cpus = num_online_cpus();
1688                        ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1689                                (cpus + 1) : (ha->msix_count - 1);
1690                        ha->max_req_queues = 2;
1691                } else if (ql2xmaxqueues > 1) {
1692                        ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1693                                                QLA_MQ_SIZE : ql2xmaxqueues;
1694                        DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1695                        " of request queues:%d\n", ha->max_req_queues));
1696                }
1697                qla_printk(KERN_INFO, ha,
1698                        "MSI-X vector count: %d\n", msix);
1699        } else
1700                qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1701
1702mqiobase_exit:
1703        ha->msix_count = ha->max_rsp_queues + 1;
1704        return (0);
1705
1706iospace_error_exit:
1707        return (-ENOMEM);
1708}
1709
1710static void
1711qla2xxx_scan_start(struct Scsi_Host *shost)
1712{
1713        scsi_qla_host_t *vha = shost_priv(shost);
1714
1715        if (vha->hw->flags.running_gold_fw)
1716                return;
1717
1718        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1719        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1720        set_bit(RSCN_UPDATE, &vha->dpc_flags);
1721        set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1722}
1723
1724static int
1725qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1726{
1727        scsi_qla_host_t *vha = shost_priv(shost);
1728
1729        if (!vha->host)
1730                return 1;
1731        if (time > vha->hw->loop_reset_delay * HZ)
1732                return 1;
1733
1734        return atomic_read(&vha->loop_state) == LOOP_READY;
1735}
1736
1737/*
1738 * PCI driver interface
1739 */
1740static int __devinit
1741qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1742{
1743        int     ret = -ENODEV;
1744        struct Scsi_Host *host;
1745        scsi_qla_host_t *base_vha = NULL;
1746        struct qla_hw_data *ha;
1747        char pci_info[30];
1748        char fw_str[30];
1749        struct scsi_host_template *sht;
1750        int bars, max_id, mem_only = 0;
1751        uint16_t req_length = 0, rsp_length = 0;
1752        struct req_que *req = NULL;
1753        struct rsp_que *rsp = NULL;
1754
1755        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1756        sht = &qla2xxx_driver_template;
1757        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1758            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1759            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1760            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1761            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1762            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1763            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
1764                bars = pci_select_bars(pdev, IORESOURCE_MEM);
1765                mem_only = 1;
1766        }
1767
1768        if (mem_only) {
1769                if (pci_enable_device_mem(pdev))
1770                        goto probe_out;
1771        } else {
1772                if (pci_enable_device(pdev))
1773                        goto probe_out;
1774        }
1775
1776        /* This may fail but that's ok */
1777        pci_enable_pcie_error_reporting(pdev);
1778
1779        ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1780        if (!ha) {
1781                DEBUG(printk("Unable to allocate memory for ha\n"));
1782                goto probe_out;
1783        }
1784        ha->pdev = pdev;
1785
1786        /* Clear our data area */
1787        ha->bars = bars;
1788        ha->mem_only = mem_only;
1789        spin_lock_init(&ha->hardware_lock);
1790
1791        /* Set ISP-type information. */
1792        qla2x00_set_isp_flags(ha);
1793        /* Configure PCI I/O space */
1794        ret = qla2x00_iospace_config(ha);
1795        if (ret)
1796                goto probe_hw_failed;
1797
1798        qla_printk(KERN_INFO, ha,
1799            "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1800            ha->iobase);
1801
1802        ha->prev_topology = 0;
1803        ha->init_cb_size = sizeof(init_cb_t);
1804        ha->link_data_rate = PORT_SPEED_UNKNOWN;
1805        ha->optrom_size = OPTROM_SIZE_2300;
1806
1807        /* Assign ISP specific operations. */
1808        max_id = MAX_TARGETS_2200;
1809        if (IS_QLA2100(ha)) {
1810                max_id = MAX_TARGETS_2100;
1811                ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1812                req_length = REQUEST_ENTRY_CNT_2100;
1813                rsp_length = RESPONSE_ENTRY_CNT_2100;
1814                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1815                ha->gid_list_info_size = 4;
1816                ha->flash_conf_off = ~0;
1817                ha->flash_data_off = ~0;
1818                ha->nvram_conf_off = ~0;
1819                ha->nvram_data_off = ~0;
1820                ha->isp_ops = &qla2100_isp_ops;
1821        } else if (IS_QLA2200(ha)) {
1822                ha->mbx_count = MAILBOX_REGISTER_COUNT;
1823                req_length = REQUEST_ENTRY_CNT_2200;
1824                rsp_length = RESPONSE_ENTRY_CNT_2100;
1825                ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1826                ha->gid_list_info_size = 4;
1827                ha->flash_conf_off = ~0;
1828                ha->flash_data_off = ~0;
1829                ha->nvram_conf_off = ~0;
1830                ha->nvram_data_off = ~0;
1831                ha->isp_ops = &qla2100_isp_ops;
1832        } else if (IS_QLA23XX(ha)) {
1833                ha->mbx_count = MAILBOX_REGISTER_COUNT;
1834                req_length = REQUEST_ENTRY_CNT_2200;
1835                rsp_length = RESPONSE_ENTRY_CNT_2300;
1836                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1837                ha->gid_list_info_size = 6;
1838                if (IS_QLA2322(ha) || IS_QLA6322(ha))
1839                        ha->optrom_size = OPTROM_SIZE_2322;
1840                ha->flash_conf_off = ~0;
1841                ha->flash_data_off = ~0;
1842                ha->nvram_conf_off = ~0;
1843                ha->nvram_data_off = ~0;
1844                ha->isp_ops = &qla2300_isp_ops;
1845        } else if (IS_QLA24XX_TYPE(ha)) {
1846                ha->mbx_count = MAILBOX_REGISTER_COUNT;
1847                req_length = REQUEST_ENTRY_CNT_24XX;
1848                rsp_length = RESPONSE_ENTRY_CNT_2300;
1849                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1850                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1851                ha->gid_list_info_size = 8;
1852                ha->optrom_size = OPTROM_SIZE_24XX;
1853                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1854                ha->isp_ops = &qla24xx_isp_ops;
1855                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1856                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1857                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1858                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1859        } else if (IS_QLA25XX(ha)) {
1860                ha->mbx_count = MAILBOX_REGISTER_COUNT;
1861                req_length = REQUEST_ENTRY_CNT_24XX;
1862                rsp_length = RESPONSE_ENTRY_CNT_2300;
1863                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1864                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1865                ha->gid_list_info_size = 8;
1866                ha->optrom_size = OPTROM_SIZE_25XX;
1867                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1868                ha->isp_ops = &qla25xx_isp_ops;
1869                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1870                ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1871                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1872                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1873        } else if (IS_QLA81XX(ha)) {
1874                ha->mbx_count = MAILBOX_REGISTER_COUNT;
1875                req_length = REQUEST_ENTRY_CNT_24XX;
1876                rsp_length = RESPONSE_ENTRY_CNT_2300;
1877                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1878                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1879                ha->gid_list_info_size = 8;
1880                ha->optrom_size = OPTROM_SIZE_81XX;
1881                ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1882                ha->isp_ops = &qla81xx_isp_ops;
1883                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1884                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
1885                ha->nvram_conf_off = ~0;
1886                ha->nvram_data_off = ~0;
1887        }
1888
1889        mutex_init(&ha->vport_lock);
1890        init_completion(&ha->mbx_cmd_comp);
1891        complete(&ha->mbx_cmd_comp);
1892        init_completion(&ha->mbx_intr_comp);
1893
1894        set_bit(0, (unsigned long *) ha->vp_idx_map);
1895
1896        qla2x00_config_dma_addressing(ha);
1897        ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1898        if (!ret) {
1899                qla_printk(KERN_WARNING, ha,
1900                    "[ERROR] Failed to allocate memory for adapter\n");
1901
1902                goto probe_hw_failed;
1903        }
1904
1905        req->max_q_depth = MAX_Q_DEPTH;
1906        if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1907                req->max_q_depth = ql2xmaxqdepth;
1908
1909
1910        base_vha = qla2x00_create_host(sht, ha);
1911        if (!base_vha) {
1912                qla_printk(KERN_WARNING, ha,
1913                    "[ERROR] Failed to allocate memory for scsi_host\n");
1914
1915                ret = -ENOMEM;
1916                qla2x00_mem_free(ha);
1917                qla2x00_free_req_que(ha, req);
1918                qla2x00_free_rsp_que(ha, rsp);
1919                goto probe_hw_failed;
1920        }
1921
1922        pci_set_drvdata(pdev, base_vha);
1923
1924        host = base_vha->host;
1925        base_vha->req = req;
1926        host->can_queue = req->length + 128;
1927        if (IS_QLA2XXX_MIDTYPE(ha))
1928                base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1929        else
1930                base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1931                                                base_vha->vp_idx;
1932        if (IS_QLA2100(ha))
1933                host->sg_tablesize = 32;
1934        host->max_id = max_id;
1935        host->this_id = 255;
1936        host->cmd_per_lun = 3;
1937        host->unique_id = host->host_no;
1938        host->max_cmd_len = MAX_CMDSZ;
1939        host->max_channel = MAX_BUSES - 1;
1940        host->max_lun = MAX_LUNS;
1941        host->transportt = qla2xxx_transport_template;
1942
1943        /* Set up the irqs */
1944        ret = qla2x00_request_irqs(ha, rsp);
1945        if (ret)
1946                goto probe_init_failed;
1947        /* Alloc arrays of request and response ring ptrs */
1948que_init:
1949        if (!qla2x00_alloc_queues(ha)) {
1950                qla_printk(KERN_WARNING, ha,
1951                "[ERROR] Failed to allocate memory for queue"
1952                " pointers\n");
1953                goto probe_init_failed;
1954        }
1955        ha->rsp_q_map[0] = rsp;
1956        ha->req_q_map[0] = req;
1957        rsp->req = req;
1958        req->rsp = rsp;
1959        set_bit(0, ha->req_qid_map);
1960        set_bit(0, ha->rsp_qid_map);
1961        /* FWI2-capable only. */
1962        req->req_q_in = &ha->iobase->isp24.req_q_in;
1963        req->req_q_out = &ha->iobase->isp24.req_q_out;
1964        rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
1965        rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
1966        if (ha->mqenable) {
1967                req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
1968                req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
1969                rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
1970                rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
1971        }
1972
1973        if (qla2x00_initialize_adapter(base_vha)) {
1974                qla_printk(KERN_WARNING, ha,
1975                    "Failed to initialize adapter\n");
1976
1977                DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1978                    "Adapter flags %x.\n",
1979                    base_vha->host_no, base_vha->device_flags));
1980
1981                ret = -ENODEV;
1982                goto probe_failed;
1983        }
1984
1985        if (ha->mqenable) {
1986                if (qla25xx_setup_mode(base_vha)) {
1987                        qla_printk(KERN_WARNING, ha,
1988                                "Can't create queues, falling back to single"
1989                                " queue mode\n");
1990                        goto que_init;
1991                }
1992        }
1993
1994        if (ha->flags.running_gold_fw)
1995                goto skip_dpc;
1996
1997        /*
1998         * Startup the kernel thread for this host adapter
1999         */
2000        ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2001                        "%s_dpc", base_vha->host_str);
2002        if (IS_ERR(ha->dpc_thread)) {
2003                qla_printk(KERN_WARNING, ha,
2004                    "Unable to start DPC thread!\n");
2005                ret = PTR_ERR(ha->dpc_thread);
2006                goto probe_failed;
2007        }
2008
2009skip_dpc:
2010        list_add_tail(&base_vha->list, &ha->vp_list);
2011        base_vha->host->irq = ha->pdev->irq;
2012
2013        /* Initialized the timer */
2014        qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2015
2016        DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2017            base_vha->host_no, ha));
2018
2019        base_vha->flags.init_done = 1;
2020        base_vha->flags.online = 1;
2021
2022        ret = scsi_add_host(host, &pdev->dev);
2023        if (ret)
2024                goto probe_failed;
2025
2026        ha->isp_ops->enable_intrs(ha);
2027
2028        scsi_scan_host(host);
2029
2030        qla2x00_alloc_sysfs_attr(base_vha);
2031
2032        qla2x00_init_host_attr(base_vha);
2033
2034        qla2x00_dfs_setup(base_vha);
2035
2036        qla_printk(KERN_INFO, ha, "\n"
2037            " QLogic Fibre Channel HBA Driver: %s\n"
2038            "  QLogic %s - %s\n"
2039            "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2040            qla2x00_version_str, ha->model_number,
2041            ha->model_desc ? ha->model_desc : "", pdev->device,
2042            ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2043            ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2044            ha->isp_ops->fw_version_str(base_vha, fw_str));
2045
2046        return 0;
2047
2048probe_init_failed:
2049        qla2x00_free_req_que(ha, req);
2050        qla2x00_free_rsp_que(ha, rsp);
2051        ha->max_req_queues = ha->max_rsp_queues = 0;
2052
2053probe_failed:
2054        if (base_vha->timer_active)
2055                qla2x00_stop_timer(base_vha);
2056        base_vha->flags.online = 0;
2057        if (ha->dpc_thread) {
2058                struct task_struct *t = ha->dpc_thread;
2059
2060                ha->dpc_thread = NULL;
2061                kthread_stop(t);
2062        }
2063
2064        qla2x00_free_device(base_vha);
2065
2066        scsi_host_put(base_vha->host);
2067
2068probe_hw_failed:
2069        if (ha->iobase)
2070                iounmap(ha->iobase);
2071
2072        pci_release_selected_regions(ha->pdev, ha->bars);
2073        kfree(ha);
2074        ha = NULL;
2075
2076probe_out:
2077        pci_disable_device(pdev);
2078        return ret;
2079}
2080
2081static void
2082qla2x00_remove_one(struct pci_dev *pdev)
2083{
2084        scsi_qla_host_t *base_vha, *vha, *temp;
2085        struct qla_hw_data  *ha;
2086
2087        base_vha = pci_get_drvdata(pdev);
2088        ha = base_vha->hw;
2089
2090        list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
2091                if (vha && vha->fc_vport)
2092                        fc_vport_terminate(vha->fc_vport);
2093        }
2094
2095        set_bit(UNLOADING, &base_vha->dpc_flags);
2096
2097        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2098
2099        qla2x00_dfs_remove(base_vha);
2100
2101        qla84xx_put_chip(base_vha);
2102
2103        /* Disable timer */
2104        if (base_vha->timer_active)
2105                qla2x00_stop_timer(base_vha);
2106
2107        base_vha->flags.online = 0;
2108
2109        /* Flush the work queue and remove it */
2110        if (ha->wq) {
2111                flush_workqueue(ha->wq);
2112                destroy_workqueue(ha->wq);
2113                ha->wq = NULL;
2114        }
2115
2116        /* Kill the kernel thread for this host */
2117        if (ha->dpc_thread) {
2118                struct task_struct *t = ha->dpc_thread;
2119
2120                /*
2121                 * qla2xxx_wake_dpc checks for ->dpc_thread
2122                 * so we need to zero it out.
2123                 */
2124                ha->dpc_thread = NULL;
2125                kthread_stop(t);
2126        }
2127
2128        qla2x00_free_sysfs_attr(base_vha);
2129
2130        fc_remove_host(base_vha->host);
2131
2132        scsi_remove_host(base_vha->host);
2133
2134        qla2x00_free_device(base_vha);
2135
2136        scsi_host_put(base_vha->host);
2137
2138        if (ha->iobase)
2139                iounmap(ha->iobase);
2140
2141        if (ha->mqiobase)
2142                iounmap(ha->mqiobase);
2143
2144        pci_release_selected_regions(ha->pdev, ha->bars);
2145        kfree(ha);
2146        ha = NULL;
2147
2148        pci_disable_device(pdev);
2149        pci_set_drvdata(pdev, NULL);
2150}
2151
2152static void
2153qla2x00_free_device(scsi_qla_host_t *vha)
2154{
2155        struct qla_hw_data *ha = vha->hw;
2156
2157        qla25xx_delete_queues(vha);
2158
2159        if (ha->flags.fce_enabled)
2160                qla2x00_disable_fce_trace(vha, NULL, NULL);
2161
2162        if (ha->eft)
2163                qla2x00_disable_eft_trace(vha);
2164
2165        /* Stop currently executing firmware. */
2166        qla2x00_try_to_stop_firmware(vha);
2167
2168        /* turn-off interrupts on the card */
2169        if (ha->interrupts_on)
2170                ha->isp_ops->disable_intrs(ha);
2171
2172        qla2x00_free_irqs(vha);
2173
2174        qla2x00_mem_free(ha);
2175
2176        qla2x00_free_queues(ha);
2177}
2178
2179static inline void
2180qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2181    int defer)
2182{
2183        struct fc_rport *rport;
2184        scsi_qla_host_t *base_vha;
2185
2186        if (!fcport->rport)
2187                return;
2188
2189        rport = fcport->rport;
2190        if (defer) {
2191                base_vha = pci_get_drvdata(vha->hw->pdev);
2192                spin_lock_irq(vha->host->host_lock);
2193                fcport->drport = rport;
2194                spin_unlock_irq(vha->host->host_lock);
2195                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2196                qla2xxx_wake_dpc(base_vha);
2197        } else
2198                fc_remote_port_delete(rport);
2199}
2200
2201/*
2202 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2203 *
2204 * Input: ha = adapter block pointer.  fcport = port structure pointer.
2205 *
2206 * Return: None.
2207 *
2208 * Context:
2209 */
2210void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2211    int do_login, int defer)
2212{
2213        if (atomic_read(&fcport->state) == FCS_ONLINE &&
2214            vha->vp_idx == fcport->vp_idx) {
2215                atomic_set(&fcport->state, FCS_DEVICE_LOST);
2216                qla2x00_schedule_rport_del(vha, fcport, defer);
2217        }
2218        /*
2219         * We may need to retry the login, so don't change the state of the
2220         * port but do the retries.
2221         */
2222        if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2223                atomic_set(&fcport->state, FCS_DEVICE_LOST);
2224
2225        if (!do_login)
2226                return;
2227
2228        if (fcport->login_retry == 0) {
2229                fcport->login_retry = vha->hw->login_retry_count;
2230                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2231
2232                DEBUG(printk("scsi(%ld): Port login retry: "
2233                    "%02x%02x%02x%02x%02x%02x%02x%02x, "
2234                    "id = 0x%04x retry cnt=%d\n",
2235                    vha->host_no,
2236                    fcport->port_name[0],
2237                    fcport->port_name[1],
2238                    fcport->port_name[2],
2239                    fcport->port_name[3],
2240                    fcport->port_name[4],
2241                    fcport->port_name[5],
2242                    fcport->port_name[6],
2243                    fcport->port_name[7],
2244                    fcport->loop_id,
2245                    fcport->login_retry));
2246        }
2247}
2248
2249/*
2250 * qla2x00_mark_all_devices_lost
2251 *      Updates fcport state when device goes offline.
2252 *
2253 * Input:
2254 *      ha = adapter block pointer.
2255 *      fcport = port structure pointer.
2256 *
2257 * Return:
2258 *      None.
2259 *
2260 * Context:
2261 */
2262void
2263qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2264{
2265        fc_port_t *fcport;
2266
2267        list_for_each_entry(fcport, &vha->vp_fcports, list) {
2268                if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
2269                        continue;
2270
2271                /*
2272                 * No point in marking the device as lost, if the device is
2273                 * already DEAD.
2274                 */
2275                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2276                        continue;
2277                if (atomic_read(&fcport->state) == FCS_ONLINE) {
2278                        if (defer)
2279                                qla2x00_schedule_rport_del(vha, fcport, defer);
2280                        else if (vha->vp_idx == fcport->vp_idx)
2281                                qla2x00_schedule_rport_del(vha, fcport, defer);
2282                }
2283                atomic_set(&fcport->state, FCS_DEVICE_LOST);
2284        }
2285}
2286
2287/*
2288* qla2x00_mem_alloc
2289*      Allocates adapter memory.
2290*
2291* Returns:
2292*      0  = success.
2293*      !0  = failure.
2294*/
2295static int
2296qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2297        struct req_que **req, struct rsp_que **rsp)
2298{
2299        char    name[16];
2300
2301        ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2302                &ha->init_cb_dma, GFP_KERNEL);
2303        if (!ha->init_cb)
2304                goto fail;
2305
2306        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2307                &ha->gid_list_dma, GFP_KERNEL);
2308        if (!ha->gid_list)
2309                goto fail_free_init_cb;
2310
2311        ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2312        if (!ha->srb_mempool)
2313                goto fail_free_gid_list;
2314
2315        /* Get memory for cached NVRAM */
2316        ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2317        if (!ha->nvram)
2318                goto fail_free_srb_mempool;
2319
2320        snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2321                ha->pdev->device);
2322        ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2323                DMA_POOL_SIZE, 8, 0);
2324        if (!ha->s_dma_pool)
2325                goto fail_free_nvram;
2326
2327        /* Allocate memory for SNS commands */
2328        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2329        /* Get consistent memory allocated for SNS commands */
2330                ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2331                sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2332                if (!ha->sns_cmd)
2333                        goto fail_dma_pool;
2334        } else {
2335        /* Get consistent memory allocated for MS IOCB */
2336                ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2337                        &ha->ms_iocb_dma);
2338                if (!ha->ms_iocb)
2339                        goto fail_dma_pool;
2340        /* Get consistent memory allocated for CT SNS commands */
2341                ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2342                        sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2343                if (!ha->ct_sns)
2344                        goto fail_free_ms_iocb;
2345        }
2346
2347        /* Allocate memory for request ring */
2348        *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2349        if (!*req) {
2350                DEBUG(printk("Unable to allocate memory for req\n"));
2351                goto fail_req;
2352        }
2353        (*req)->length = req_len;
2354        (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2355                ((*req)->length + 1) * sizeof(request_t),
2356                &(*req)->dma, GFP_KERNEL);
2357        if (!(*req)->ring) {
2358                DEBUG(printk("Unable to allocate memory for req_ring\n"));
2359                goto fail_req_ring;
2360        }
2361        /* Allocate memory for response ring */
2362        *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2363        if (!*rsp) {
2364                qla_printk(KERN_WARNING, ha,
2365                        "Unable to allocate memory for rsp\n");
2366                goto fail_rsp;
2367        }
2368        (*rsp)->hw = ha;
2369        (*rsp)->length = rsp_len;
2370        (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2371                ((*rsp)->length + 1) * sizeof(response_t),
2372                &(*rsp)->dma, GFP_KERNEL);
2373        if (!(*rsp)->ring) {
2374                qla_printk(KERN_WARNING, ha,
2375                        "Unable to allocate memory for rsp_ring\n");
2376                goto fail_rsp_ring;
2377        }
2378        (*req)->rsp = *rsp;
2379        (*rsp)->req = *req;
2380        /* Allocate memory for NVRAM data for vports */
2381        if (ha->nvram_npiv_size) {
2382                ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2383                                        ha->nvram_npiv_size, GFP_KERNEL);
2384                if (!ha->npiv_info) {
2385                        qla_printk(KERN_WARNING, ha,
2386                                "Unable to allocate memory for npiv info\n");
2387                        goto fail_npiv_info;
2388                }
2389        } else
2390                ha->npiv_info = NULL;
2391
2392        /* Get consistent memory allocated for EX-INIT-CB. */
2393        if (IS_QLA81XX(ha)) {
2394                ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2395                    &ha->ex_init_cb_dma);
2396                if (!ha->ex_init_cb)
2397                        goto fail_ex_init_cb;
2398        }
2399
2400        INIT_LIST_HEAD(&ha->vp_list);
2401        return 1;
2402
2403fail_ex_init_cb:
2404        kfree(ha->npiv_info);
2405fail_npiv_info:
2406        dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2407                sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2408        (*rsp)->ring = NULL;
2409        (*rsp)->dma = 0;
2410fail_rsp_ring:
2411        kfree(*rsp);
2412fail_rsp:
2413        dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2414                sizeof(request_t), (*req)->ring, (*req)->dma);
2415        (*req)->ring = NULL;
2416        (*req)->dma = 0;
2417fail_req_ring:
2418        kfree(*req);
2419fail_req:
2420        dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2421                ha->ct_sns, ha->ct_sns_dma);
2422        ha->ct_sns = NULL;
2423        ha->ct_sns_dma = 0;
2424fail_free_ms_iocb:
2425        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2426        ha->ms_iocb = NULL;
2427        ha->ms_iocb_dma = 0;
2428fail_dma_pool:
2429        dma_pool_destroy(ha->s_dma_pool);
2430        ha->s_dma_pool = NULL;
2431fail_free_nvram:
2432        kfree(ha->nvram);
2433        ha->nvram = NULL;
2434fail_free_srb_mempool:
2435        mempool_destroy(ha->srb_mempool);
2436        ha->srb_mempool = NULL;
2437fail_free_gid_list:
2438        dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2439        ha->gid_list_dma);
2440        ha->gid_list = NULL;
2441        ha->gid_list_dma = 0;
2442fail_free_init_cb:
2443        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2444        ha->init_cb_dma);
2445        ha->init_cb = NULL;
2446        ha->init_cb_dma = 0;
2447fail:
2448        DEBUG(printk("%s: Memory allocation failure\n", __func__));
2449        return -ENOMEM;
2450}
2451
2452/*
2453* qla2x00_mem_free
2454*      Frees all adapter allocated memory.
2455*
2456* Input:
2457*      ha = adapter block pointer.
2458*/
2459static void
2460qla2x00_mem_free(struct qla_hw_data *ha)
2461{
2462        if (ha->srb_mempool)
2463                mempool_destroy(ha->srb_mempool);
2464
2465        if (ha->fce)
2466                dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2467                ha->fce_dma);
2468
2469        if (ha->fw_dump) {
2470                if (ha->eft)
2471                        dma_free_coherent(&ha->pdev->dev,
2472                        ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2473                vfree(ha->fw_dump);
2474        }
2475
2476        if (ha->dcbx_tlv)
2477                dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2478                    ha->dcbx_tlv, ha->dcbx_tlv_dma);
2479
2480        if (ha->xgmac_data)
2481                dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2482                    ha->xgmac_data, ha->xgmac_data_dma);
2483
2484        if (ha->sns_cmd)
2485                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2486                ha->sns_cmd, ha->sns_cmd_dma);
2487
2488        if (ha->ct_sns)
2489                dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2490                ha->ct_sns, ha->ct_sns_dma);
2491
2492        if (ha->sfp_data)
2493                dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2494
2495        if (ha->edc_data)
2496                dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2497
2498        if (ha->ms_iocb)
2499                dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2500
2501        if (ha->ex_init_cb)
2502                dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2503
2504        if (ha->s_dma_pool)
2505                dma_pool_destroy(ha->s_dma_pool);
2506
2507        if (ha->gid_list)
2508                dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2509                ha->gid_list_dma);
2510
2511        if (ha->init_cb)
2512                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2513                ha->init_cb, ha->init_cb_dma);
2514        vfree(ha->optrom_buffer);
2515        kfree(ha->nvram);
2516        kfree(ha->npiv_info);
2517
2518        ha->srb_mempool = NULL;
2519        ha->eft = NULL;
2520        ha->eft_dma = 0;
2521        ha->sns_cmd = NULL;
2522        ha->sns_cmd_dma = 0;
2523        ha->ct_sns = NULL;
2524        ha->ct_sns_dma = 0;
2525        ha->ms_iocb = NULL;
2526        ha->ms_iocb_dma = 0;
2527        ha->init_cb = NULL;
2528        ha->init_cb_dma = 0;
2529        ha->ex_init_cb = NULL;
2530        ha->ex_init_cb_dma = 0;
2531
2532        ha->s_dma_pool = NULL;
2533
2534        ha->gid_list = NULL;
2535        ha->gid_list_dma = 0;
2536
2537        ha->fw_dump = NULL;
2538        ha->fw_dumped = 0;
2539        ha->fw_dump_reading = 0;
2540}
2541
2542struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2543                                                struct qla_hw_data *ha)
2544{
2545        struct Scsi_Host *host;
2546        struct scsi_qla_host *vha = NULL;
2547
2548        host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2549        if (host == NULL) {
2550                printk(KERN_WARNING
2551                "qla2xxx: Couldn't allocate host from scsi layer!\n");
2552                goto fail;
2553        }
2554
2555        /* Clear our data area */
2556        vha = shost_priv(host);
2557        memset(vha, 0, sizeof(scsi_qla_host_t));
2558
2559        vha->host = host;
2560        vha->host_no = host->host_no;
2561        vha->hw = ha;
2562
2563        INIT_LIST_HEAD(&vha->vp_fcports);
2564        INIT_LIST_HEAD(&vha->work_list);
2565        INIT_LIST_HEAD(&vha->list);
2566
2567        spin_lock_init(&vha->work_lock);
2568
2569        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2570        return vha;
2571
2572fail:
2573        return vha;
2574}
2575
2576static struct qla_work_evt *
2577qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2578{
2579        struct qla_work_evt *e;
2580
2581        e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2582        if (!e)
2583                return NULL;
2584
2585        INIT_LIST_HEAD(&e->list);
2586        e->type = type;
2587        e->flags = QLA_EVT_FLAG_FREE;
2588        return e;
2589}
2590
2591static int
2592qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2593{
2594        unsigned long flags;
2595
2596        spin_lock_irqsave(&vha->work_lock, flags);
2597        list_add_tail(&e->list, &vha->work_list);
2598        spin_unlock_irqrestore(&vha->work_lock, flags);
2599        qla2xxx_wake_dpc(vha);
2600
2601        return QLA_SUCCESS;
2602}
2603
2604int
2605qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2606    u32 data)
2607{
2608        struct qla_work_evt *e;
2609
2610        e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2611        if (!e)
2612                return QLA_FUNCTION_FAILED;
2613
2614        e->u.aen.code = code;
2615        e->u.aen.data = data;
2616        return qla2x00_post_work(vha, e);
2617}
2618
2619int
2620qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2621{
2622        struct qla_work_evt *e;
2623
2624        e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2625        if (!e)
2626                return QLA_FUNCTION_FAILED;
2627
2628        memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2629        return qla2x00_post_work(vha, e);
2630}
2631
2632#define qla2x00_post_async_work(name, type)     \
2633int qla2x00_post_async_##name##_work(           \
2634    struct scsi_qla_host *vha,                  \
2635    fc_port_t *fcport, uint16_t *data)          \
2636{                                               \
2637        struct qla_work_evt *e;                 \
2638                                                \
2639        e = qla2x00_alloc_work(vha, type);      \
2640        if (!e)                                 \
2641                return QLA_FUNCTION_FAILED;     \
2642                                                \
2643        e->u.logio.fcport = fcport;             \
2644        if (data) {                             \
2645                e->u.logio.data[0] = data[0];   \
2646                e->u.logio.data[1] = data[1];   \
2647        }                                       \
2648        return qla2x00_post_work(vha, e);       \
2649}
2650
2651qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
2652qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2653qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2654qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
2655
2656void
2657qla2x00_do_work(struct scsi_qla_host *vha)
2658{
2659        struct qla_work_evt *e, *tmp;
2660        unsigned long flags;
2661        LIST_HEAD(work);
2662
2663        spin_lock_irqsave(&vha->work_lock, flags);
2664        list_splice_init(&vha->work_list, &work);
2665        spin_unlock_irqrestore(&vha->work_lock, flags);
2666
2667        list_for_each_entry_safe(e, tmp, &work, list) {
2668                list_del_init(&e->list);
2669
2670                switch (e->type) {
2671                case QLA_EVT_AEN:
2672                        fc_host_post_event(vha->host, fc_get_event_number(),
2673                            e->u.aen.code, e->u.aen.data);
2674                        break;
2675                case QLA_EVT_IDC_ACK:
2676                        qla81xx_idc_ack(vha, e->u.idc_ack.mb);
2677                        break;
2678                case QLA_EVT_ASYNC_LOGIN:
2679                        qla2x00_async_login(vha, e->u.logio.fcport,
2680                            e->u.logio.data);
2681                        break;
2682                case QLA_EVT_ASYNC_LOGIN_DONE:
2683                        qla2x00_async_login_done(vha, e->u.logio.fcport,
2684                            e->u.logio.data);
2685                        break;
2686                case QLA_EVT_ASYNC_LOGOUT:
2687                        qla2x00_async_logout(vha, e->u.logio.fcport);
2688                        break;
2689                case QLA_EVT_ASYNC_LOGOUT_DONE:
2690                        qla2x00_async_logout_done(vha, e->u.logio.fcport,
2691                            e->u.logio.data);
2692                        break;
2693                }
2694                if (e->flags & QLA_EVT_FLAG_FREE)
2695                        kfree(e);
2696        }
2697}
2698
2699/* Relogins all the fcports of a vport
2700 * Context: dpc thread
2701 */
2702void qla2x00_relogin(struct scsi_qla_host *vha)
2703{
2704        fc_port_t       *fcport;
2705        int status;
2706        uint16_t        next_loopid = 0;
2707        struct qla_hw_data *ha = vha->hw;
2708        uint16_t data[2];
2709
2710        list_for_each_entry(fcport, &vha->vp_fcports, list) {
2711        /*
2712         * If the port is not ONLINE then try to login
2713         * to it if we haven't run out of retries.
2714         */
2715                if (atomic_read(&fcport->state) !=
2716                        FCS_ONLINE && fcport->login_retry) {
2717
2718                        fcport->login_retry--;
2719                        if (fcport->flags & FCF_FABRIC_DEVICE) {
2720                                if (fcport->flags & FCF_TAPE_PRESENT)
2721                                        ha->isp_ops->fabric_logout(vha,
2722                                                        fcport->loop_id,
2723                                                        fcport->d_id.b.domain,
2724                                                        fcport->d_id.b.area,
2725                                                        fcport->d_id.b.al_pa);
2726
2727                                if (IS_ALOGIO_CAPABLE(ha)) {
2728                                        data[0] = 0;
2729                                        data[1] = QLA_LOGIO_LOGIN_RETRIED;
2730                                        status = qla2x00_post_async_login_work(
2731                                            vha, fcport, data);
2732                                        if (status == QLA_SUCCESS)
2733                                                continue;
2734                                        /* Attempt a retry. */
2735                                        status = 1;
2736                                } else
2737                                        status = qla2x00_fabric_login(vha,
2738                                            fcport, &next_loopid);
2739                        } else
2740                                status = qla2x00_local_device_login(vha,
2741                                                                fcport);
2742
2743                        if (status == QLA_SUCCESS) {
2744                                fcport->old_loop_id = fcport->loop_id;
2745
2746                                DEBUG(printk("scsi(%ld): port login OK: logged "
2747                                "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2748
2749                                qla2x00_update_fcport(vha, fcport);
2750
2751                        } else if (status == 1) {
2752                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2753                                /* retry the login again */
2754                                DEBUG(printk("scsi(%ld): Retrying"
2755                                " %d login again loop_id 0x%x\n",
2756                                vha->host_no, fcport->login_retry,
2757                                                fcport->loop_id));
2758                        } else {
2759                                fcport->login_retry = 0;
2760                        }
2761
2762                        if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2763                                fcport->loop_id = FC_NO_LOOP_ID;
2764                }
2765                if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2766                        break;
2767        }
2768}
2769
2770/**************************************************************************
2771* qla2x00_do_dpc
2772*   This kernel thread is a task that is schedule by the interrupt handler
2773*   to perform the background processing for interrupts.
2774*
2775* Notes:
2776* This task always run in the context of a kernel thread.  It
2777* is kick-off by the driver's detect code and starts up
2778* up one per adapter. It immediately goes to sleep and waits for
2779* some fibre event.  When either the interrupt handler or
2780* the timer routine detects a event it will one of the task
2781* bits then wake us up.
2782**************************************************************************/
2783static int
2784qla2x00_do_dpc(void *data)
2785{
2786        int             rval;
2787        scsi_qla_host_t *base_vha;
2788        struct qla_hw_data *ha;
2789
2790        ha = (struct qla_hw_data *)data;
2791        base_vha = pci_get_drvdata(ha->pdev);
2792
2793        set_user_nice(current, -20);
2794
2795        while (!kthread_should_stop()) {
2796                DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
2797
2798                set_current_state(TASK_INTERRUPTIBLE);
2799                schedule();
2800                __set_current_state(TASK_RUNNING);
2801
2802                DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2803
2804                /* Initialization not yet finished. Don't do anything yet. */
2805                if (!base_vha->flags.init_done)
2806                        continue;
2807
2808                DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2809
2810                ha->dpc_active = 1;
2811
2812                if (ha->flags.mbox_busy) {
2813                        ha->dpc_active = 0;
2814                        continue;
2815                }
2816
2817                qla2x00_do_work(base_vha);
2818
2819                if (test_and_clear_bit(ISP_ABORT_NEEDED,
2820                                                &base_vha->dpc_flags)) {
2821
2822                        DEBUG(printk("scsi(%ld): dpc: sched "
2823                            "qla2x00_abort_isp ha = %p\n",
2824                            base_vha->host_no, ha));
2825                        if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2826                            &base_vha->dpc_flags))) {
2827
2828                                if (qla2x00_abort_isp(base_vha)) {
2829                                        /* failed. retry later */
2830                                        set_bit(ISP_ABORT_NEEDED,
2831                                            &base_vha->dpc_flags);
2832                                }
2833                                clear_bit(ABORT_ISP_ACTIVE,
2834                                                &base_vha->dpc_flags);
2835                        }
2836
2837                        DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2838                            base_vha->host_no));
2839                }
2840
2841                if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2842                        qla2x00_update_fcports(base_vha);
2843                        clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2844                }
2845
2846                if (test_and_clear_bit(RESET_MARKER_NEEDED,
2847                                                        &base_vha->dpc_flags) &&
2848                    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2849
2850                        DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2851                            base_vha->host_no));
2852
2853                        qla2x00_rst_aen(base_vha);
2854                        clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2855                }
2856
2857                /* Retry each device up to login retry count */
2858                if ((test_and_clear_bit(RELOGIN_NEEDED,
2859                                                &base_vha->dpc_flags)) &&
2860                    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2861                    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2862
2863                        DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2864                                        base_vha->host_no));
2865                        qla2x00_relogin(base_vha);
2866
2867                        DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2868                            base_vha->host_no));
2869                }
2870
2871                if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2872                                                        &base_vha->dpc_flags)) {
2873
2874                        DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2875                                base_vha->host_no));
2876
2877                        if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2878                            &base_vha->dpc_flags))) {
2879
2880                                rval = qla2x00_loop_resync(base_vha);
2881
2882                                clear_bit(LOOP_RESYNC_ACTIVE,
2883                                                &base_vha->dpc_flags);
2884                        }
2885
2886                        DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2887                            base_vha->host_no));
2888                }
2889
2890                if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2891                    atomic_read(&base_vha->loop_state) == LOOP_READY) {
2892                        clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2893                        qla2xxx_flash_npiv_conf(base_vha);
2894                }
2895
2896                if (!ha->interrupts_on)
2897                        ha->isp_ops->enable_intrs(ha);
2898
2899                if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2900                                        &base_vha->dpc_flags))
2901                        ha->isp_ops->beacon_blink(base_vha);
2902
2903                qla2x00_do_dpc_all_vps(base_vha);
2904
2905                ha->dpc_active = 0;
2906        } /* End of while(1) */
2907
2908        DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2909
2910        /*
2911         * Make sure that nobody tries to wake us up again.
2912         */
2913        ha->dpc_active = 0;
2914
2915        /* Cleanup any residual CTX SRBs. */
2916        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2917
2918        return 0;
2919}
2920
2921void
2922qla2xxx_wake_dpc(struct scsi_qla_host *vha)
2923{
2924        struct qla_hw_data *ha = vha->hw;
2925        struct task_struct *t = ha->dpc_thread;
2926
2927        if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
2928                wake_up_process(t);
2929}
2930
2931/*
2932*  qla2x00_rst_aen
2933*      Processes asynchronous reset.
2934*
2935* Input:
2936*      ha  = adapter block pointer.
2937*/
2938static void
2939qla2x00_rst_aen(scsi_qla_host_t *vha)
2940{
2941        if (vha->flags.online && !vha->flags.reset_active &&
2942            !atomic_read(&vha->loop_down_timer) &&
2943            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
2944                do {
2945                        clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2946
2947                        /*
2948                         * Issue marker command only when we are going to start
2949                         * the I/O.
2950                         */
2951                        vha->marker_needed = 1;
2952                } while (!atomic_read(&vha->loop_down_timer) &&
2953                    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
2954        }
2955}
2956
2957static void
2958qla2x00_sp_free_dma(srb_t *sp)
2959{
2960        struct scsi_cmnd *cmd = sp->cmd;
2961
2962        if (sp->flags & SRB_DMA_VALID) {
2963                scsi_dma_unmap(cmd);
2964                sp->flags &= ~SRB_DMA_VALID;
2965        }
2966        CMD_SP(cmd) = NULL;
2967}
2968
2969void
2970qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
2971{
2972        struct scsi_cmnd *cmd = sp->cmd;
2973
2974        qla2x00_sp_free_dma(sp);
2975
2976        mempool_free(sp, ha->srb_mempool);
2977
2978        cmd->scsi_done(cmd);
2979}
2980
2981/**************************************************************************
2982*   qla2x00_timer
2983*
2984* Description:
2985*   One second timer
2986*
2987* Context: Interrupt
2988***************************************************************************/
2989void
2990qla2x00_timer(scsi_qla_host_t *vha)
2991{
2992        unsigned long   cpu_flags = 0;
2993        fc_port_t       *fcport;
2994        int             start_dpc = 0;
2995        int             index;
2996        srb_t           *sp;
2997        int             t;
2998        struct qla_hw_data *ha = vha->hw;
2999        struct req_que *req;
3000        /*
3001         * Ports - Port down timer.
3002         *
3003         * Whenever, a port is in the LOST state we start decrementing its port
3004         * down timer every second until it reaches zero. Once  it reaches zero
3005         * the port it marked DEAD.
3006         */
3007        t = 0;
3008        list_for_each_entry(fcport, &vha->vp_fcports, list) {
3009                if (fcport->port_type != FCT_TARGET)
3010                        continue;
3011
3012                if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3013
3014                        if (atomic_read(&fcport->port_down_timer) == 0)
3015                                continue;
3016
3017                        if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
3018                                atomic_set(&fcport->state, FCS_DEVICE_DEAD);
3019
3020                        DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
3021                            "%d remaining\n",
3022                            vha->host_no,
3023                            t, atomic_read(&fcport->port_down_timer)));
3024                }
3025                t++;
3026        } /* End of for fcport  */
3027
3028
3029        /* Loop down handler. */
3030        if (atomic_read(&vha->loop_down_timer) > 0 &&
3031            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3032                && vha->flags.online) {
3033
3034                if (atomic_read(&vha->loop_down_timer) ==
3035                    vha->loop_down_abort_time) {
3036
3037                        DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3038                            "queues before time expire\n",
3039                            vha->host_no));
3040
3041                        if (!IS_QLA2100(ha) && vha->link_down_timeout)
3042                                atomic_set(&vha->loop_state, LOOP_DEAD);
3043
3044                        /* Schedule an ISP abort to return any tape commands. */
3045                        /* NPIV - scan physical port only */
3046                        if (!vha->vp_idx) {
3047                                spin_lock_irqsave(&ha->hardware_lock,
3048                                    cpu_flags);
3049                                req = ha->req_q_map[0];
3050                                for (index = 1;
3051                                    index < MAX_OUTSTANDING_COMMANDS;
3052                                    index++) {
3053                                        fc_port_t *sfcp;
3054
3055                                        sp = req->outstanding_cmds[index];
3056                                        if (!sp)
3057                                                continue;
3058                                        if (sp->ctx)
3059                                                continue;
3060                                        sfcp = sp->fcport;
3061                                        if (!(sfcp->flags & FCF_TAPE_PRESENT))
3062                                                continue;
3063
3064                                        set_bit(ISP_ABORT_NEEDED,
3065                                                        &vha->dpc_flags);
3066                                        break;
3067                                }
3068                                spin_unlock_irqrestore(&ha->hardware_lock,
3069                                                                cpu_flags);
3070                        }
3071                        start_dpc++;
3072                }
3073
3074                /* if the loop has been down for 4 minutes, reinit adapter */
3075                if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3076                        if (!(vha->device_flags & DFLG_NO_CABLE)) {
3077                                DEBUG(printk("scsi(%ld): Loop down - "
3078                                    "aborting ISP.\n",
3079                                    vha->host_no));
3080                                qla_printk(KERN_WARNING, ha,
3081                                    "Loop down - aborting ISP.\n");
3082
3083                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3084                        }
3085                }
3086                DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
3087                    vha->host_no,
3088                    atomic_read(&vha->loop_down_timer)));
3089        }
3090
3091        /* Check if beacon LED needs to be blinked */
3092        if (ha->beacon_blink_led == 1) {
3093                set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3094                start_dpc++;
3095        }
3096
3097        /* Process any deferred work. */
3098        if (!list_empty(&vha->work_list))
3099                start_dpc++;
3100
3101        /* Schedule the DPC routine if needed */
3102        if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3103            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3104            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
3105            start_dpc ||
3106            test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3107            test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3108            test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3109            test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3110                qla2xxx_wake_dpc(vha);
3111
3112        qla2x00_restart_timer(vha, WATCH_INTERVAL);
3113}
3114
3115/* Firmware interface routines. */
3116
3117#define FW_BLOBS        7
3118#define FW_ISP21XX      0
3119#define FW_ISP22XX      1
3120#define FW_ISP2300      2
3121#define FW_ISP2322      3
3122#define FW_ISP24XX      4
3123#define FW_ISP25XX      5
3124#define FW_ISP81XX      6
3125
3126#define FW_FILE_ISP21XX "ql2100_fw.bin"
3127#define FW_FILE_ISP22XX "ql2200_fw.bin"
3128#define FW_FILE_ISP2300 "ql2300_fw.bin"
3129#define FW_FILE_ISP2322 "ql2322_fw.bin"
3130#define FW_FILE_ISP24XX "ql2400_fw.bin"
3131#define FW_FILE_ISP25XX "ql2500_fw.bin"
3132#define FW_FILE_ISP81XX "ql8100_fw.bin"
3133
3134static DEFINE_MUTEX(qla_fw_lock);
3135
3136static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3137        { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3138        { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3139        { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3140        { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3141        { .name = FW_FILE_ISP24XX, },
3142        { .name = FW_FILE_ISP25XX, },
3143        { .name = FW_FILE_ISP81XX, },
3144};
3145
3146struct fw_blob *
3147qla2x00_request_firmware(scsi_qla_host_t *vha)
3148{
3149        struct qla_hw_data *ha = vha->hw;
3150        struct fw_blob *blob;
3151
3152        blob = NULL;
3153        if (IS_QLA2100(ha)) {
3154                blob = &qla_fw_blobs[FW_ISP21XX];
3155        } else if (IS_QLA2200(ha)) {
3156                blob = &qla_fw_blobs[FW_ISP22XX];
3157        } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3158                blob = &qla_fw_blobs[FW_ISP2300];
3159        } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
3160                blob = &qla_fw_blobs[FW_ISP2322];
3161        } else if (IS_QLA24XX_TYPE(ha)) {
3162                blob = &qla_fw_blobs[FW_ISP24XX];
3163        } else if (IS_QLA25XX(ha)) {
3164                blob = &qla_fw_blobs[FW_ISP25XX];
3165        } else if (IS_QLA81XX(ha)) {
3166                blob = &qla_fw_blobs[FW_ISP81XX];
3167        }
3168
3169        mutex_lock(&qla_fw_lock);
3170        if (blob->fw)
3171                goto out;
3172
3173        if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3174                DEBUG2(printk("scsi(%ld): Failed to load firmware image "
3175                    "(%s).\n", vha->host_no, blob->name));
3176                blob->fw = NULL;
3177                blob = NULL;
3178                goto out;
3179        }
3180
3181out:
3182        mutex_unlock(&qla_fw_lock);
3183        return blob;
3184}
3185
3186static void
3187qla2x00_release_firmware(void)
3188{
3189        int idx;
3190
3191        mutex_lock(&qla_fw_lock);
3192        for (idx = 0; idx < FW_BLOBS; idx++)
3193                if (qla_fw_blobs[idx].fw)
3194                        release_firmware(qla_fw_blobs[idx].fw);
3195        mutex_unlock(&qla_fw_lock);
3196}
3197
3198static pci_ers_result_t
3199qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3200{
3201        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3202
3203        switch (state) {
3204        case pci_channel_io_normal:
3205                return PCI_ERS_RESULT_CAN_RECOVER;
3206        case pci_channel_io_frozen:
3207                pci_disable_device(pdev);
3208                return PCI_ERS_RESULT_NEED_RESET;
3209        case pci_channel_io_perm_failure:
3210                qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3211                return PCI_ERS_RESULT_DISCONNECT;
3212        }
3213        return PCI_ERS_RESULT_NEED_RESET;
3214}
3215
3216static pci_ers_result_t
3217qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3218{
3219        int risc_paused = 0;
3220        uint32_t stat;
3221        unsigned long flags;
3222        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3223        struct qla_hw_data *ha = base_vha->hw;
3224        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3225        struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3226
3227        spin_lock_irqsave(&ha->hardware_lock, flags);
3228        if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3229                stat = RD_REG_DWORD(&reg->hccr);
3230                if (stat & HCCR_RISC_PAUSE)
3231                        risc_paused = 1;
3232        } else if (IS_QLA23XX(ha)) {
3233                stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3234                if (stat & HSR_RISC_PAUSED)
3235                        risc_paused = 1;
3236        } else if (IS_FWI2_CAPABLE(ha)) {
3237                stat = RD_REG_DWORD(&reg24->host_status);
3238                if (stat & HSRX_RISC_PAUSED)
3239                        risc_paused = 1;
3240        }
3241        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3242
3243        if (risc_paused) {
3244                qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3245                    "Dumping firmware!\n");
3246                ha->isp_ops->fw_dump(base_vha, 0);
3247
3248                return PCI_ERS_RESULT_NEED_RESET;
3249        } else
3250                return PCI_ERS_RESULT_RECOVERED;
3251}
3252
3253static pci_ers_result_t
3254qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3255{
3256        pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3257        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3258        struct qla_hw_data *ha = base_vha->hw;
3259        int rc;
3260
3261        if (ha->mem_only)
3262                rc = pci_enable_device_mem(pdev);
3263        else
3264                rc = pci_enable_device(pdev);
3265
3266        if (rc) {
3267                qla_printk(KERN_WARNING, ha,
3268                    "Can't re-enable PCI device after reset.\n");
3269
3270                return ret;
3271        }
3272        pci_set_master(pdev);
3273
3274        if (ha->isp_ops->pci_config(base_vha))
3275                return ret;
3276
3277        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3278        if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3279                ret =  PCI_ERS_RESULT_RECOVERED;
3280        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3281
3282        return ret;
3283}
3284
3285static void
3286qla2xxx_pci_resume(struct pci_dev *pdev)
3287{
3288        scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3289        struct qla_hw_data *ha = base_vha->hw;
3290        int ret;
3291
3292        ret = qla2x00_wait_for_hba_online(base_vha);
3293        if (ret != QLA_SUCCESS) {
3294                qla_printk(KERN_ERR, ha,
3295                    "the device failed to resume I/O "
3296                    "from slot/link_reset");
3297        }
3298        pci_cleanup_aer_uncorrect_error_status(pdev);
3299}
3300
3301static struct pci_error_handlers qla2xxx_err_handler = {
3302        .error_detected = qla2xxx_pci_error_detected,
3303        .mmio_enabled = qla2xxx_pci_mmio_enabled,
3304        .slot_reset = qla2xxx_pci_slot_reset,
3305        .resume = qla2xxx_pci_resume,
3306};
3307
3308static struct pci_device_id qla2xxx_pci_tbl[] = {
3309        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
3310        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
3311        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
3312        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
3313        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
3314        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
3315        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
3316        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
3317        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
3318        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
3319        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3320        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
3321        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3322        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
3323        { 0 },
3324};
3325MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
3326
3327static struct pci_driver qla2xxx_pci_driver = {
3328        .name           = QLA2XXX_DRIVER_NAME,
3329        .driver         = {
3330                .owner          = THIS_MODULE,
3331        },
3332        .id_table       = qla2xxx_pci_tbl,
3333        .probe          = qla2x00_probe_one,
3334        .remove         = qla2x00_remove_one,
3335        .err_handler    = &qla2xxx_err_handler,
3336};
3337
3338/**
3339 * qla2x00_module_init - Module initialization.
3340 **/
3341static int __init
3342qla2x00_module_init(void)
3343{
3344        int ret = 0;
3345
3346        /* Allocate cache for SRBs. */
3347        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
3348            SLAB_HWCACHE_ALIGN, NULL);
3349        if (srb_cachep == NULL) {
3350                printk(KERN_ERR
3351                    "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
3352                return -ENOMEM;
3353        }
3354
3355        /* Derive version string. */
3356        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
3357        if (ql2xextended_error_logging)
3358                strcat(qla2x00_version_str, "-debug");
3359
3360        qla2xxx_transport_template =
3361            fc_attach_transport(&qla2xxx_transport_functions);
3362        if (!qla2xxx_transport_template) {
3363                kmem_cache_destroy(srb_cachep);
3364                return -ENODEV;
3365        }
3366        qla2xxx_transport_vport_template =
3367            fc_attach_transport(&qla2xxx_transport_vport_functions);
3368        if (!qla2xxx_transport_vport_template) {
3369                kmem_cache_destroy(srb_cachep);
3370                fc_release_transport(qla2xxx_transport_template);
3371                return -ENODEV;
3372        }
3373
3374        printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
3375            qla2x00_version_str);
3376        ret = pci_register_driver(&qla2xxx_pci_driver);
3377        if (ret) {
3378                kmem_cache_destroy(srb_cachep);
3379                fc_release_transport(qla2xxx_transport_template);
3380                fc_release_transport(qla2xxx_transport_vport_template);
3381        }
3382        return ret;
3383}
3384
3385/**
3386 * qla2x00_module_exit - Module cleanup.
3387 **/
3388static void __exit
3389qla2x00_module_exit(void)
3390{
3391        pci_unregister_driver(&qla2xxx_pci_driver);
3392        qla2x00_release_firmware();
3393        kmem_cache_destroy(srb_cachep);
3394        fc_release_transport(qla2xxx_transport_template);
3395        fc_release_transport(qla2xxx_transport_vport_template);
3396}
3397
3398module_init(qla2x00_module_init);
3399module_exit(qla2x00_module_exit);
3400
3401MODULE_AUTHOR("QLogic Corporation");
3402MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
3403MODULE_LICENSE("GPL");
3404MODULE_VERSION(QLA2XXX_VERSION);
3405MODULE_FIRMWARE(FW_FILE_ISP21XX);
3406MODULE_FIRMWARE(FW_FILE_ISP22XX);
3407MODULE_FIRMWARE(FW_FILE_ISP2300);
3408MODULE_FIRMWARE(FW_FILE_ISP2322);
3409MODULE_FIRMWARE(FW_FILE_ISP24XX);
3410MODULE_FIRMWARE(FW_FILE_ISP25XX);
3411MODULE_FIRMWARE(FW_FILE_ISP81XX);
3412