linux/drivers/scsi/qla2xxx/qla_init.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2013 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_gbl.h"
   9
  10#include <linux/delay.h>
  11#include <linux/slab.h>
  12#include <linux/vmalloc.h>
  13
  14#include "qla_devtbl.h"
  15
  16#ifdef CONFIG_SPARC
  17#include <asm/prom.h>
  18#endif
  19
  20#include <target/target_core_base.h>
  21#include "qla_target.h"
  22
  23/*
  24*  QLogic ISP2x00 Hardware Support Function Prototypes.
  25*/
  26static int qla2x00_isp_firmware(scsi_qla_host_t *);
  27static int qla2x00_setup_chip(scsi_qla_host_t *);
  28static int qla2x00_fw_ready(scsi_qla_host_t *);
  29static int qla2x00_configure_hba(scsi_qla_host_t *);
  30static int qla2x00_configure_loop(scsi_qla_host_t *);
  31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
  32static int qla2x00_configure_fabric(scsi_qla_host_t *);
  33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
  34static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
  35    uint16_t *);
  36
  37static int qla2x00_restart_isp(scsi_qla_host_t *);
  38
  39static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
  40static int qla84xx_init_chip(scsi_qla_host_t *);
  41static int qla25xx_init_queues(struct qla_hw_data *);
  42
  43/* SRB Extensions ---------------------------------------------------------- */
  44
  45void
  46qla2x00_sp_timeout(unsigned long __data)
  47{
  48        srb_t *sp = (srb_t *)__data;
  49        struct srb_iocb *iocb;
  50        fc_port_t *fcport = sp->fcport;
  51        struct qla_hw_data *ha = fcport->vha->hw;
  52        struct req_que *req;
  53        unsigned long flags;
  54
  55        spin_lock_irqsave(&ha->hardware_lock, flags);
  56        req = ha->req_q_map[0];
  57        req->outstanding_cmds[sp->handle] = NULL;
  58        iocb = &sp->u.iocb_cmd;
  59        iocb->timeout(sp);
  60        sp->free(fcport->vha, sp);
  61        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  62}
  63
  64void
  65qla2x00_sp_free(void *data, void *ptr)
  66{
  67        srb_t *sp = (srb_t *)ptr;
  68        struct srb_iocb *iocb = &sp->u.iocb_cmd;
  69        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
  70
  71        del_timer(&iocb->timer);
  72        qla2x00_rel_sp(vha, sp);
  73}
  74
  75/* Asynchronous Login/Logout Routines -------------------------------------- */
  76
  77unsigned long
  78qla2x00_get_async_timeout(struct scsi_qla_host *vha)
  79{
  80        unsigned long tmo;
  81        struct qla_hw_data *ha = vha->hw;
  82
  83        /* Firmware should use switch negotiated r_a_tov for timeout. */
  84        tmo = ha->r_a_tov / 10 * 2;
  85        if (IS_QLAFX00(ha)) {
  86                tmo = FX00_DEF_RATOV * 2;
  87        } else if (!IS_FWI2_CAPABLE(ha)) {
  88                /*
  89                 * Except for earlier ISPs where the timeout is seeded from the
  90                 * initialization control block.
  91                 */
  92                tmo = ha->login_timeout;
  93        }
  94        return tmo;
  95}
  96
  97static void
  98qla2x00_async_iocb_timeout(void *data)
  99{
 100        srb_t *sp = (srb_t *)data;
 101        fc_port_t *fcport = sp->fcport;
 102
 103        ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
 104            "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
 105            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
 106            fcport->d_id.b.al_pa);
 107
 108        fcport->flags &= ~FCF_ASYNC_SENT;
 109        if (sp->type == SRB_LOGIN_CMD) {
 110                struct srb_iocb *lio = &sp->u.iocb_cmd;
 111                qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
 112                /* Retry as needed. */
 113                lio->u.logio.data[0] = MBS_COMMAND_ERROR;
 114                lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
 115                        QLA_LOGIO_LOGIN_RETRIED : 0;
 116                qla2x00_post_async_login_done_work(fcport->vha, fcport,
 117                        lio->u.logio.data);
 118        }
 119}
 120
 121static void
 122qla2x00_async_login_sp_done(void *data, void *ptr, int res)
 123{
 124        srb_t *sp = (srb_t *)ptr;
 125        struct srb_iocb *lio = &sp->u.iocb_cmd;
 126        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 127
 128        if (!test_bit(UNLOADING, &vha->dpc_flags))
 129                qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
 130                    lio->u.logio.data);
 131        sp->free(sp->fcport->vha, sp);
 132}
 133
 134int
 135qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 136    uint16_t *data)
 137{
 138        srb_t *sp;
 139        struct srb_iocb *lio;
 140        int rval;
 141
 142        rval = QLA_FUNCTION_FAILED;
 143        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 144        if (!sp)
 145                goto done;
 146
 147        sp->type = SRB_LOGIN_CMD;
 148        sp->name = "login";
 149        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 150
 151        lio = &sp->u.iocb_cmd;
 152        lio->timeout = qla2x00_async_iocb_timeout;
 153        sp->done = qla2x00_async_login_sp_done;
 154        lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
 155        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 156                lio->u.logio.flags |= SRB_LOGIN_RETRIED;
 157        rval = qla2x00_start_sp(sp);
 158        if (rval != QLA_SUCCESS)
 159                goto done_free_sp;
 160
 161        ql_dbg(ql_dbg_disc, vha, 0x2072,
 162            "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
 163            "retries=%d.\n", sp->handle, fcport->loop_id,
 164            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
 165            fcport->login_retry);
 166        return rval;
 167
 168done_free_sp:
 169        sp->free(fcport->vha, sp);
 170done:
 171        return rval;
 172}
 173
 174static void
 175qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
 176{
 177        srb_t *sp = (srb_t *)ptr;
 178        struct srb_iocb *lio = &sp->u.iocb_cmd;
 179        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 180
 181        if (!test_bit(UNLOADING, &vha->dpc_flags))
 182                qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
 183                    lio->u.logio.data);
 184        sp->free(sp->fcport->vha, sp);
 185}
 186
 187int
 188qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
 189{
 190        srb_t *sp;
 191        struct srb_iocb *lio;
 192        int rval;
 193
 194        rval = QLA_FUNCTION_FAILED;
 195        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 196        if (!sp)
 197                goto done;
 198
 199        sp->type = SRB_LOGOUT_CMD;
 200        sp->name = "logout";
 201        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 202
 203        lio = &sp->u.iocb_cmd;
 204        lio->timeout = qla2x00_async_iocb_timeout;
 205        sp->done = qla2x00_async_logout_sp_done;
 206        rval = qla2x00_start_sp(sp);
 207        if (rval != QLA_SUCCESS)
 208                goto done_free_sp;
 209
 210        ql_dbg(ql_dbg_disc, vha, 0x2070,
 211            "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 212            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 213            fcport->d_id.b.area, fcport->d_id.b.al_pa);
 214        return rval;
 215
 216done_free_sp:
 217        sp->free(fcport->vha, sp);
 218done:
 219        return rval;
 220}
 221
 222static void
 223qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
 224{
 225        srb_t *sp = (srb_t *)ptr;
 226        struct srb_iocb *lio = &sp->u.iocb_cmd;
 227        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 228
 229        if (!test_bit(UNLOADING, &vha->dpc_flags))
 230                qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
 231                    lio->u.logio.data);
 232        sp->free(sp->fcport->vha, sp);
 233}
 234
 235int
 236qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
 237    uint16_t *data)
 238{
 239        srb_t *sp;
 240        struct srb_iocb *lio;
 241        int rval;
 242
 243        rval = QLA_FUNCTION_FAILED;
 244        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 245        if (!sp)
 246                goto done;
 247
 248        sp->type = SRB_ADISC_CMD;
 249        sp->name = "adisc";
 250        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 251
 252        lio = &sp->u.iocb_cmd;
 253        lio->timeout = qla2x00_async_iocb_timeout;
 254        sp->done = qla2x00_async_adisc_sp_done;
 255        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 256                lio->u.logio.flags |= SRB_LOGIN_RETRIED;
 257        rval = qla2x00_start_sp(sp);
 258        if (rval != QLA_SUCCESS)
 259                goto done_free_sp;
 260
 261        ql_dbg(ql_dbg_disc, vha, 0x206f,
 262            "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
 263            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 264            fcport->d_id.b.area, fcport->d_id.b.al_pa);
 265        return rval;
 266
 267done_free_sp:
 268        sp->free(fcport->vha, sp);
 269done:
 270        return rval;
 271}
 272
 273static void
 274qla2x00_tmf_iocb_timeout(void *data)
 275{
 276        srb_t *sp = (srb_t *)data;
 277        struct srb_iocb *tmf = &sp->u.iocb_cmd;
 278
 279        tmf->u.tmf.comp_status = CS_TIMEOUT;
 280        complete(&tmf->u.tmf.comp);
 281}
 282
 283static void
 284qla2x00_tmf_sp_done(void *data, void *ptr, int res)
 285{
 286        srb_t *sp = (srb_t *)ptr;
 287        struct srb_iocb *tmf = &sp->u.iocb_cmd;
 288        complete(&tmf->u.tmf.comp);
 289}
 290
 291int
 292qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 293        uint32_t tag)
 294{
 295        struct scsi_qla_host *vha = fcport->vha;
 296        struct srb_iocb *tm_iocb;
 297        srb_t *sp;
 298        int rval = QLA_FUNCTION_FAILED;
 299
 300        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 301        if (!sp)
 302                goto done;
 303
 304        tm_iocb = &sp->u.iocb_cmd;
 305        sp->type = SRB_TM_CMD;
 306        sp->name = "tmf";
 307        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
 308        tm_iocb->u.tmf.flags = flags;
 309        tm_iocb->u.tmf.lun = lun;
 310        tm_iocb->u.tmf.data = tag;
 311        sp->done = qla2x00_tmf_sp_done;
 312        tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
 313        init_completion(&tm_iocb->u.tmf.comp);
 314
 315        rval = qla2x00_start_sp(sp);
 316        if (rval != QLA_SUCCESS)
 317                goto done_free_sp;
 318
 319        ql_dbg(ql_dbg_taskm, vha, 0x802f,
 320            "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 321            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 322            fcport->d_id.b.area, fcport->d_id.b.al_pa);
 323
 324        wait_for_completion(&tm_iocb->u.tmf.comp);
 325
 326        rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
 327            QLA_SUCCESS : QLA_FUNCTION_FAILED;
 328
 329        if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
 330                ql_dbg(ql_dbg_taskm, vha, 0x8030,
 331                    "TM IOCB failed (%x).\n", rval);
 332        }
 333
 334        if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
 335                flags = tm_iocb->u.tmf.flags;
 336                lun = (uint16_t)tm_iocb->u.tmf.lun;
 337
 338                /* Issue Marker IOCB */
 339                qla2x00_marker(vha, vha->hw->req_q_map[0],
 340                    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
 341                    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
 342        }
 343
 344done_free_sp:
 345        sp->free(vha, sp);
 346done:
 347        return rval;
 348}
 349
 350static void
 351qla24xx_abort_iocb_timeout(void *data)
 352{
 353        srb_t *sp = (srb_t *)data;
 354        struct srb_iocb *abt = &sp->u.iocb_cmd;
 355
 356        abt->u.abt.comp_status = CS_TIMEOUT;
 357        complete(&abt->u.abt.comp);
 358}
 359
 360static void
 361qla24xx_abort_sp_done(void *data, void *ptr, int res)
 362{
 363        srb_t *sp = (srb_t *)ptr;
 364        struct srb_iocb *abt = &sp->u.iocb_cmd;
 365
 366        complete(&abt->u.abt.comp);
 367}
 368
 369static int
 370qla24xx_async_abort_cmd(srb_t *cmd_sp)
 371{
 372        scsi_qla_host_t *vha = cmd_sp->fcport->vha;
 373        fc_port_t *fcport = cmd_sp->fcport;
 374        struct srb_iocb *abt_iocb;
 375        srb_t *sp;
 376        int rval = QLA_FUNCTION_FAILED;
 377
 378        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 379        if (!sp)
 380                goto done;
 381
 382        abt_iocb = &sp->u.iocb_cmd;
 383        sp->type = SRB_ABT_CMD;
 384        sp->name = "abort";
 385        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
 386        abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
 387        sp->done = qla24xx_abort_sp_done;
 388        abt_iocb->timeout = qla24xx_abort_iocb_timeout;
 389        init_completion(&abt_iocb->u.abt.comp);
 390
 391        rval = qla2x00_start_sp(sp);
 392        if (rval != QLA_SUCCESS)
 393                goto done_free_sp;
 394
 395        ql_dbg(ql_dbg_async, vha, 0x507c,
 396            "Abort command issued - hdl=%x, target_id=%x\n",
 397            cmd_sp->handle, fcport->tgt_id);
 398
 399        wait_for_completion(&abt_iocb->u.abt.comp);
 400
 401        rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
 402            QLA_SUCCESS : QLA_FUNCTION_FAILED;
 403
 404done_free_sp:
 405        sp->free(vha, sp);
 406done:
 407        return rval;
 408}
 409
 410int
 411qla24xx_async_abort_command(srb_t *sp)
 412{
 413        unsigned long   flags = 0;
 414
 415        uint32_t        handle;
 416        fc_port_t       *fcport = sp->fcport;
 417        struct scsi_qla_host *vha = fcport->vha;
 418        struct qla_hw_data *ha = vha->hw;
 419        struct req_que *req = vha->req;
 420
 421        spin_lock_irqsave(&ha->hardware_lock, flags);
 422        for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
 423                if (req->outstanding_cmds[handle] == sp)
 424                        break;
 425        }
 426        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 427        if (handle == req->num_outstanding_cmds) {
 428                /* Command not found. */
 429                return QLA_FUNCTION_FAILED;
 430        }
 431        if (sp->type == SRB_FXIOCB_DCMD)
 432                return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
 433                    FXDISC_ABORT_IOCTL);
 434
 435        return qla24xx_async_abort_cmd(sp);
 436}
 437
 438void
 439qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 440    uint16_t *data)
 441{
 442        int rval;
 443
 444        switch (data[0]) {
 445        case MBS_COMMAND_COMPLETE:
 446                /*
 447                 * Driver must validate login state - If PRLI not complete,
 448                 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
 449                 * requests.
 450                 */
 451                rval = qla2x00_get_port_database(vha, fcport, 0);
 452                if (rval == QLA_NOT_LOGGED_IN) {
 453                        fcport->flags &= ~FCF_ASYNC_SENT;
 454                        fcport->flags |= FCF_LOGIN_NEEDED;
 455                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 456                        break;
 457                }
 458
 459                if (rval != QLA_SUCCESS) {
 460                        qla2x00_post_async_logout_work(vha, fcport, NULL);
 461                        qla2x00_post_async_login_work(vha, fcport, NULL);
 462                        break;
 463                }
 464                if (fcport->flags & FCF_FCP2_DEVICE) {
 465                        qla2x00_post_async_adisc_work(vha, fcport, data);
 466                        break;
 467                }
 468                qla2x00_update_fcport(vha, fcport);
 469                break;
 470        case MBS_COMMAND_ERROR:
 471                fcport->flags &= ~FCF_ASYNC_SENT;
 472                if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 473                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 474                else
 475                        qla2x00_mark_device_lost(vha, fcport, 1, 0);
 476                break;
 477        case MBS_PORT_ID_USED:
 478                fcport->loop_id = data[1];
 479                qla2x00_post_async_logout_work(vha, fcport, NULL);
 480                qla2x00_post_async_login_work(vha, fcport, NULL);
 481                break;
 482        case MBS_LOOP_ID_USED:
 483                fcport->loop_id++;
 484                rval = qla2x00_find_new_loop_id(vha, fcport);
 485                if (rval != QLA_SUCCESS) {
 486                        fcport->flags &= ~FCF_ASYNC_SENT;
 487                        qla2x00_mark_device_lost(vha, fcport, 1, 0);
 488                        break;
 489                }
 490                qla2x00_post_async_login_work(vha, fcport, NULL);
 491                break;
 492        }
 493        return;
 494}
 495
 496void
 497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 498    uint16_t *data)
 499{
 500        qla2x00_mark_device_lost(vha, fcport, 1, 0);
 501        return;
 502}
 503
 504void
 505qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 506    uint16_t *data)
 507{
 508        if (data[0] == MBS_COMMAND_COMPLETE) {
 509                qla2x00_update_fcport(vha, fcport);
 510
 511                return;
 512        }
 513
 514        /* Retry login. */
 515        fcport->flags &= ~FCF_ASYNC_SENT;
 516        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 517                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 518        else
 519                qla2x00_mark_device_lost(vha, fcport, 1, 0);
 520
 521        return;
 522}
 523
 524/****************************************************************************/
 525/*                QLogic ISP2x00 Hardware Support Functions.                */
 526/****************************************************************************/
 527
 528static int
 529qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
 530{
 531        int rval = QLA_SUCCESS;
 532        struct qla_hw_data *ha = vha->hw;
 533        uint32_t idc_major_ver, idc_minor_ver;
 534        uint16_t config[4];
 535
 536        qla83xx_idc_lock(vha, 0);
 537
 538        /* SV: TODO: Assign initialization timeout from
 539         * flash-info / other param
 540         */
 541        ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
 542        ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
 543
 544        /* Set our fcoe function presence */
 545        if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
 546                ql_dbg(ql_dbg_p3p, vha, 0xb077,
 547                    "Error while setting DRV-Presence.\n");
 548                rval = QLA_FUNCTION_FAILED;
 549                goto exit;
 550        }
 551
 552        /* Decide the reset ownership */
 553        qla83xx_reset_ownership(vha);
 554
 555        /*
 556         * On first protocol driver load:
 557         * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
 558         * register.
 559         * Others: Check compatibility with current IDC Major version.
 560         */
 561        qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
 562        if (ha->flags.nic_core_reset_owner) {
 563                /* Set IDC Major version */
 564                idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
 565                qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
 566
 567                /* Clearing IDC-Lock-Recovery register */
 568                qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
 569        } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
 570                /*
 571                 * Clear further IDC participation if we are not compatible with
 572                 * the current IDC Major Version.
 573                 */
 574                ql_log(ql_log_warn, vha, 0xb07d,
 575                    "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
 576                    idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
 577                __qla83xx_clear_drv_presence(vha);
 578                rval = QLA_FUNCTION_FAILED;
 579                goto exit;
 580        }
 581        /* Each function sets its supported Minor version. */
 582        qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
 583        idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
 584        qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
 585
 586        if (ha->flags.nic_core_reset_owner) {
 587                memset(config, 0, sizeof(config));
 588                if (!qla81xx_get_port_config(vha, config))
 589                        qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
 590                            QLA8XXX_DEV_READY);
 591        }
 592
 593        rval = qla83xx_idc_state_handler(vha);
 594
 595exit:
 596        qla83xx_idc_unlock(vha, 0);
 597
 598        return rval;
 599}
 600
 601/*
 602* qla2x00_initialize_adapter
 603*      Initialize board.
 604*
 605* Input:
 606*      ha = adapter block pointer.
 607*
 608* Returns:
 609*      0 = success
 610*/
 611int
 612qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 613{
 614        int     rval;
 615        struct qla_hw_data *ha = vha->hw;
 616        struct req_que *req = ha->req_q_map[0];
 617
 618        /* Clear adapter flags. */
 619        vha->flags.online = 0;
 620        ha->flags.chip_reset_done = 0;
 621        vha->flags.reset_active = 0;
 622        ha->flags.pci_channel_io_perm_failure = 0;
 623        ha->flags.eeh_busy = 0;
 624        vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
 625        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 626        atomic_set(&vha->loop_state, LOOP_DOWN);
 627        vha->device_flags = DFLG_NO_CABLE;
 628        vha->dpc_flags = 0;
 629        vha->flags.management_server_logged_in = 0;
 630        vha->marker_needed = 0;
 631        ha->isp_abort_cnt = 0;
 632        ha->beacon_blink_led = 0;
 633
 634        set_bit(0, ha->req_qid_map);
 635        set_bit(0, ha->rsp_qid_map);
 636
 637        ql_dbg(ql_dbg_init, vha, 0x0040,
 638            "Configuring PCI space...\n");
 639        rval = ha->isp_ops->pci_config(vha);
 640        if (rval) {
 641                ql_log(ql_log_warn, vha, 0x0044,
 642                    "Unable to configure PCI space.\n");
 643                return (rval);
 644        }
 645
 646        ha->isp_ops->reset_chip(vha);
 647
 648        rval = qla2xxx_get_flash_info(vha);
 649        if (rval) {
 650                ql_log(ql_log_fatal, vha, 0x004f,
 651                    "Unable to validate FLASH data.\n");
 652                return rval;
 653        }
 654
 655        if (IS_QLA8044(ha)) {
 656                qla8044_read_reset_template(vha);
 657
 658                /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
 659                 * If DONRESET_BIT0 is set, drivers should not set dev_state
 660                 * to NEED_RESET. But if NEED_RESET is set, drivers should
 661                 * should honor the reset. */
 662                if (ql2xdontresethba == 1)
 663                        qla8044_set_idc_dontreset(vha);
 664        }
 665
 666        ha->isp_ops->get_flash_version(vha, req->ring);
 667        ql_dbg(ql_dbg_init, vha, 0x0061,
 668            "Configure NVRAM parameters...\n");
 669
 670        ha->isp_ops->nvram_config(vha);
 671
 672        if (ha->flags.disable_serdes) {
 673                /* Mask HBA via NVRAM settings? */
 674                ql_log(ql_log_info, vha, 0x0077,
 675                    "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
 676                return QLA_FUNCTION_FAILED;
 677        }
 678
 679        ql_dbg(ql_dbg_init, vha, 0x0078,
 680            "Verifying loaded RISC code...\n");
 681
 682        if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
 683                rval = ha->isp_ops->chip_diag(vha);
 684                if (rval)
 685                        return (rval);
 686                rval = qla2x00_setup_chip(vha);
 687                if (rval)
 688                        return (rval);
 689        }
 690
 691        if (IS_QLA84XX(ha)) {
 692                ha->cs84xx = qla84xx_get_chip(vha);
 693                if (!ha->cs84xx) {
 694                        ql_log(ql_log_warn, vha, 0x00d0,
 695                            "Unable to configure ISP84XX.\n");
 696                        return QLA_FUNCTION_FAILED;
 697                }
 698        }
 699
 700        if (qla_ini_mode_enabled(vha))
 701                rval = qla2x00_init_rings(vha);
 702
 703        ha->flags.chip_reset_done = 1;
 704
 705        if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
 706                /* Issue verify 84xx FW IOCB to complete 84xx initialization */
 707                rval = qla84xx_init_chip(vha);
 708                if (rval != QLA_SUCCESS) {
 709                        ql_log(ql_log_warn, vha, 0x00d4,
 710                            "Unable to initialize ISP84XX.\n");
 711                qla84xx_put_chip(vha);
 712                }
 713        }
 714
 715        /* Load the NIC Core f/w if we are the first protocol driver. */
 716        if (IS_QLA8031(ha)) {
 717                rval = qla83xx_nic_core_fw_load(vha);
 718                if (rval)
 719                        ql_log(ql_log_warn, vha, 0x0124,
 720                            "Error in initializing NIC Core f/w.\n");
 721        }
 722
 723        if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
 724                qla24xx_read_fcp_prio_cfg(vha);
 725
 726        if (IS_P3P_TYPE(ha))
 727                qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
 728        else
 729                qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
 730
 731        return (rval);
 732}
 733
 734/**
 735 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
 736 * @ha: HA context
 737 *
 738 * Returns 0 on success.
 739 */
 740int
 741qla2100_pci_config(scsi_qla_host_t *vha)
 742{
 743        uint16_t w;
 744        unsigned long flags;
 745        struct qla_hw_data *ha = vha->hw;
 746        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 747
 748        pci_set_master(ha->pdev);
 749        pci_try_set_mwi(ha->pdev);
 750
 751        pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
 752        w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
 753        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 754
 755        pci_disable_rom(ha->pdev);
 756
 757        /* Get PCI bus information. */
 758        spin_lock_irqsave(&ha->hardware_lock, flags);
 759        ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
 760        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 761
 762        return QLA_SUCCESS;
 763}
 764
 765/**
 766 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
 767 * @ha: HA context
 768 *
 769 * Returns 0 on success.
 770 */
 771int
 772qla2300_pci_config(scsi_qla_host_t *vha)
 773{
 774        uint16_t        w;
 775        unsigned long   flags = 0;
 776        uint32_t        cnt;
 777        struct qla_hw_data *ha = vha->hw;
 778        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 779
 780        pci_set_master(ha->pdev);
 781        pci_try_set_mwi(ha->pdev);
 782
 783        pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
 784        w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
 785
 786        if (IS_QLA2322(ha) || IS_QLA6322(ha))
 787                w &= ~PCI_COMMAND_INTX_DISABLE;
 788        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 789
 790        /*
 791         * If this is a 2300 card and not 2312, reset the
 792         * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
 793         * the 2310 also reports itself as a 2300 so we need to get the
 794         * fb revision level -- a 6 indicates it really is a 2300 and
 795         * not a 2310.
 796         */
 797        if (IS_QLA2300(ha)) {
 798                spin_lock_irqsave(&ha->hardware_lock, flags);
 799
 800                /* Pause RISC. */
 801                WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
 802                for (cnt = 0; cnt < 30000; cnt++) {
 803                        if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
 804                                break;
 805
 806                        udelay(10);
 807                }
 808
 809                /* Select FPM registers. */
 810                WRT_REG_WORD(&reg->ctrl_status, 0x20);
 811                RD_REG_WORD(&reg->ctrl_status);
 812
 813                /* Get the fb rev level */
 814                ha->fb_rev = RD_FB_CMD_REG(ha, reg);
 815
 816                if (ha->fb_rev == FPM_2300)
 817                        pci_clear_mwi(ha->pdev);
 818
 819                /* Deselect FPM registers. */
 820                WRT_REG_WORD(&reg->ctrl_status, 0x0);
 821                RD_REG_WORD(&reg->ctrl_status);
 822
 823                /* Release RISC module. */
 824                WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
 825                for (cnt = 0; cnt < 30000; cnt++) {
 826                        if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
 827                                break;
 828
 829                        udelay(10);
 830                }
 831
 832                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 833        }
 834
 835        pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
 836
 837        pci_disable_rom(ha->pdev);
 838
 839        /* Get PCI bus information. */
 840        spin_lock_irqsave(&ha->hardware_lock, flags);
 841        ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
 842        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 843
 844        return QLA_SUCCESS;
 845}
 846
 847/**
 848 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
 849 * @ha: HA context
 850 *
 851 * Returns 0 on success.
 852 */
 853int
 854qla24xx_pci_config(scsi_qla_host_t *vha)
 855{
 856        uint16_t w;
 857        unsigned long flags = 0;
 858        struct qla_hw_data *ha = vha->hw;
 859        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 860
 861        pci_set_master(ha->pdev);
 862        pci_try_set_mwi(ha->pdev);
 863
 864        pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
 865        w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
 866        w &= ~PCI_COMMAND_INTX_DISABLE;
 867        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 868
 869        pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
 870
 871        /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
 872        if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
 873                pcix_set_mmrbc(ha->pdev, 2048);
 874
 875        /* PCIe -- adjust Maximum Read Request Size (2048). */
 876        if (pci_is_pcie(ha->pdev))
 877                pcie_set_readrq(ha->pdev, 4096);
 878
 879        pci_disable_rom(ha->pdev);
 880
 881        ha->chip_revision = ha->pdev->revision;
 882
 883        /* Get PCI bus information. */
 884        spin_lock_irqsave(&ha->hardware_lock, flags);
 885        ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
 886        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 887
 888        return QLA_SUCCESS;
 889}
 890
 891/**
 892 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
 893 * @ha: HA context
 894 *
 895 * Returns 0 on success.
 896 */
 897int
 898qla25xx_pci_config(scsi_qla_host_t *vha)
 899{
 900        uint16_t w;
 901        struct qla_hw_data *ha = vha->hw;
 902
 903        pci_set_master(ha->pdev);
 904        pci_try_set_mwi(ha->pdev);
 905
 906        pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
 907        w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
 908        w &= ~PCI_COMMAND_INTX_DISABLE;
 909        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 910
 911        /* PCIe -- adjust Maximum Read Request Size (2048). */
 912        if (pci_is_pcie(ha->pdev))
 913                pcie_set_readrq(ha->pdev, 4096);
 914
 915        pci_disable_rom(ha->pdev);
 916
 917        ha->chip_revision = ha->pdev->revision;
 918
 919        return QLA_SUCCESS;
 920}
 921
 922/**
 923 * qla2x00_isp_firmware() - Choose firmware image.
 924 * @ha: HA context
 925 *
 926 * Returns 0 on success.
 927 */
 928static int
 929qla2x00_isp_firmware(scsi_qla_host_t *vha)
 930{
 931        int  rval;
 932        uint16_t loop_id, topo, sw_cap;
 933        uint8_t domain, area, al_pa;
 934        struct qla_hw_data *ha = vha->hw;
 935
 936        /* Assume loading risc code */
 937        rval = QLA_FUNCTION_FAILED;
 938
 939        if (ha->flags.disable_risc_code_load) {
 940                ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
 941
 942                /* Verify checksum of loaded RISC code. */
 943                rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
 944                if (rval == QLA_SUCCESS) {
 945                        /* And, verify we are not in ROM code. */
 946                        rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
 947                            &area, &domain, &topo, &sw_cap);
 948                }
 949        }
 950
 951        if (rval)
 952                ql_dbg(ql_dbg_init, vha, 0x007a,
 953                    "**** Load RISC code ****.\n");
 954
 955        return (rval);
 956}
 957
 958/**
 959 * qla2x00_reset_chip() - Reset ISP chip.
 960 * @ha: HA context
 961 *
 962 * Returns 0 on success.
 963 */
 964void
 965qla2x00_reset_chip(scsi_qla_host_t *vha)
 966{
 967        unsigned long   flags = 0;
 968        struct qla_hw_data *ha = vha->hw;
 969        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 970        uint32_t        cnt;
 971        uint16_t        cmd;
 972
 973        if (unlikely(pci_channel_offline(ha->pdev)))
 974                return;
 975
 976        ha->isp_ops->disable_intrs(ha);
 977
 978        spin_lock_irqsave(&ha->hardware_lock, flags);
 979
 980        /* Turn off master enable */
 981        cmd = 0;
 982        pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
 983        cmd &= ~PCI_COMMAND_MASTER;
 984        pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
 985
 986        if (!IS_QLA2100(ha)) {
 987                /* Pause RISC. */
 988                WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
 989                if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
 990                        for (cnt = 0; cnt < 30000; cnt++) {
 991                                if ((RD_REG_WORD(&reg->hccr) &
 992                                    HCCR_RISC_PAUSE) != 0)
 993                                        break;
 994                                udelay(100);
 995                        }
 996                } else {
 997                        RD_REG_WORD(&reg->hccr);        /* PCI Posting. */
 998                        udelay(10);
 999                }
1000
1001                /* Select FPM registers. */
1002                WRT_REG_WORD(&reg->ctrl_status, 0x20);
1003                RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1004
1005                /* FPM Soft Reset. */
1006                WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
1007                RD_REG_WORD(&reg->fpm_diag_config);     /* PCI Posting. */
1008
1009                /* Toggle Fpm Reset. */
1010                if (!IS_QLA2200(ha)) {
1011                        WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
1012                        RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1013                }
1014
1015                /* Select frame buffer registers. */
1016                WRT_REG_WORD(&reg->ctrl_status, 0x10);
1017                RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1018
1019                /* Reset frame buffer FIFOs. */
1020                if (IS_QLA2200(ha)) {
1021                        WRT_FB_CMD_REG(ha, reg, 0xa000);
1022                        RD_FB_CMD_REG(ha, reg);         /* PCI Posting. */
1023                } else {
1024                        WRT_FB_CMD_REG(ha, reg, 0x00fc);
1025
1026                        /* Read back fb_cmd until zero or 3 seconds max */
1027                        for (cnt = 0; cnt < 3000; cnt++) {
1028                                if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
1029                                        break;
1030                                udelay(100);
1031                        }
1032                }
1033
1034                /* Select RISC module registers. */
1035                WRT_REG_WORD(&reg->ctrl_status, 0);
1036                RD_REG_WORD(&reg->ctrl_status);         /* PCI Posting. */
1037
1038                /* Reset RISC processor. */
1039                WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1040                RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1041
1042                /* Release RISC processor. */
1043                WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1044                RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1045        }
1046
1047        WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1048        WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
1049
1050        /* Reset ISP chip. */
1051        WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1052
1053        /* Wait for RISC to recover from reset. */
1054        if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1055                /*
1056                 * It is necessary to for a delay here since the card doesn't
1057                 * respond to PCI reads during a reset. On some architectures
1058                 * this will result in an MCA.
1059                 */
1060                udelay(20);
1061                for (cnt = 30000; cnt; cnt--) {
1062                        if ((RD_REG_WORD(&reg->ctrl_status) &
1063                            CSR_ISP_SOFT_RESET) == 0)
1064                                break;
1065                        udelay(100);
1066                }
1067        } else
1068                udelay(10);
1069
1070        /* Reset RISC processor. */
1071        WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1072
1073        WRT_REG_WORD(&reg->semaphore, 0);
1074
1075        /* Release RISC processor. */
1076        WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1077        RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
1078
1079        if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1080                for (cnt = 0; cnt < 30000; cnt++) {
1081                        if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1082                                break;
1083
1084                        udelay(100);
1085                }
1086        } else
1087                udelay(100);
1088
1089        /* Turn on master enable */
1090        cmd |= PCI_COMMAND_MASTER;
1091        pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1092
1093        /* Disable RISC pause on FPM parity error. */
1094        if (!IS_QLA2100(ha)) {
1095                WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
1096                RD_REG_WORD(&reg->hccr);                /* PCI Posting. */
1097        }
1098
1099        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1100}
1101
1102/**
1103 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
1104 *
1105 * Returns 0 on success.
1106 */
1107static int
1108qla81xx_reset_mpi(scsi_qla_host_t *vha)
1109{
1110        uint16_t mb[4] = {0x1010, 0, 1, 0};
1111
1112        if (!IS_QLA81XX(vha->hw))
1113                return QLA_SUCCESS;
1114
1115        return qla81xx_write_mpi_register(vha, mb);
1116}
1117
1118/**
1119 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
1120 * @ha: HA context
1121 *
1122 * Returns 0 on success.
1123 */
1124static inline void
1125qla24xx_reset_risc(scsi_qla_host_t *vha)
1126{
1127        unsigned long flags = 0;
1128        struct qla_hw_data *ha = vha->hw;
1129        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1130        uint32_t cnt, d2;
1131        uint16_t wd;
1132        static int abts_cnt; /* ISP abort retry counts */
1133
1134        spin_lock_irqsave(&ha->hardware_lock, flags);
1135
1136        /* Reset RISC. */
1137        WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1138        for (cnt = 0; cnt < 30000; cnt++) {
1139                if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
1140                        break;
1141
1142                udelay(10);
1143        }
1144
1145        WRT_REG_DWORD(&reg->ctrl_status,
1146            CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1147        pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1148
1149        udelay(100);
1150        /* Wait for firmware to complete NVRAM accesses. */
1151        d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1152        for (cnt = 10000 ; cnt && d2; cnt--) {
1153                udelay(5);
1154                d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1155                barrier();
1156        }
1157
1158        /* Wait for soft-reset to complete. */
1159        d2 = RD_REG_DWORD(&reg->ctrl_status);
1160        for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
1161                udelay(5);
1162                d2 = RD_REG_DWORD(&reg->ctrl_status);
1163                barrier();
1164        }
1165
1166        /* If required, do an MPI FW reset now */
1167        if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1168                if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1169                        if (++abts_cnt < 5) {
1170                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1171                                set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1172                        } else {
1173                                /*
1174                                 * We exhausted the ISP abort retries. We have to
1175                                 * set the board offline.
1176                                 */
1177                                abts_cnt = 0;
1178                                vha->flags.online = 0;
1179                        }
1180                }
1181        }
1182
1183        WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1184        RD_REG_DWORD(&reg->hccr);
1185
1186        WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1187        RD_REG_DWORD(&reg->hccr);
1188
1189        WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1190        RD_REG_DWORD(&reg->hccr);
1191
1192        d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1193        for (cnt = 6000000 ; cnt && d2; cnt--) {
1194                udelay(5);
1195                d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1196                barrier();
1197        }
1198
1199        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1200
1201        if (IS_NOPOLLING_TYPE(ha))
1202                ha->isp_ops->enable_intrs(ha);
1203}
1204
1205static void
1206qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
1207{
1208        struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1209
1210        WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1211        *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
1212
1213}
1214
1215static void
1216qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
1217{
1218        struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1219
1220        WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1221        WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
1222}
1223
1224static void
1225qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
1226{
1227        struct qla_hw_data *ha = vha->hw;
1228        uint32_t wd32 = 0;
1229        uint delta_msec = 100;
1230        uint elapsed_msec = 0;
1231        uint timeout_msec;
1232        ulong n;
1233
1234        if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
1235                return;
1236
1237attempt:
1238        timeout_msec = TIMEOUT_SEMAPHORE;
1239        n = timeout_msec / delta_msec;
1240        while (n--) {
1241                qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
1242                qla25xx_read_risc_sema_reg(vha, &wd32);
1243                if (wd32 & RISC_SEMAPHORE)
1244                        break;
1245                msleep(delta_msec);
1246                elapsed_msec += delta_msec;
1247                if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1248                        goto force;
1249        }
1250
1251        if (!(wd32 & RISC_SEMAPHORE))
1252                goto force;
1253
1254        if (!(wd32 & RISC_SEMAPHORE_FORCE))
1255                goto acquired;
1256
1257        qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
1258        timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
1259        n = timeout_msec / delta_msec;
1260        while (n--) {
1261                qla25xx_read_risc_sema_reg(vha, &wd32);
1262                if (!(wd32 & RISC_SEMAPHORE_FORCE))
1263                        break;
1264                msleep(delta_msec);
1265                elapsed_msec += delta_msec;
1266                if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1267                        goto force;
1268        }
1269
1270        if (wd32 & RISC_SEMAPHORE_FORCE)
1271                qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
1272
1273        goto attempt;
1274
1275force:
1276        qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
1277
1278acquired:
1279        return;
1280}
1281
1282/**
1283 * qla24xx_reset_chip() - Reset ISP24xx chip.
1284 * @ha: HA context
1285 *
1286 * Returns 0 on success.
1287 */
1288void
1289qla24xx_reset_chip(scsi_qla_host_t *vha)
1290{
1291        struct qla_hw_data *ha = vha->hw;
1292
1293        if (pci_channel_offline(ha->pdev) &&
1294            ha->flags.pci_channel_io_perm_failure) {
1295                return;
1296        }
1297
1298        ha->isp_ops->disable_intrs(ha);
1299
1300        qla25xx_manipulate_risc_semaphore(vha);
1301
1302        /* Perform RISC reset. */
1303        qla24xx_reset_risc(vha);
1304}
1305
1306/**
1307 * qla2x00_chip_diag() - Test chip for proper operation.
1308 * @ha: HA context
1309 *
1310 * Returns 0 on success.
1311 */
1312int
1313qla2x00_chip_diag(scsi_qla_host_t *vha)
1314{
1315        int             rval;
1316        struct qla_hw_data *ha = vha->hw;
1317        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1318        unsigned long   flags = 0;
1319        uint16_t        data;
1320        uint32_t        cnt;
1321        uint16_t        mb[5];
1322        struct req_que *req = ha->req_q_map[0];
1323
1324        /* Assume a failed state */
1325        rval = QLA_FUNCTION_FAILED;
1326
1327        ql_dbg(ql_dbg_init, vha, 0x007b,
1328            "Testing device at %lx.\n", (u_long)&reg->flash_address);
1329
1330        spin_lock_irqsave(&ha->hardware_lock, flags);
1331
1332        /* Reset ISP chip. */
1333        WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1334
1335        /*
1336         * We need to have a delay here since the card will not respond while
1337         * in reset causing an MCA on some architectures.
1338         */
1339        udelay(20);
1340        data = qla2x00_debounce_register(&reg->ctrl_status);
1341        for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1342                udelay(5);
1343                data = RD_REG_WORD(&reg->ctrl_status);
1344                barrier();
1345        }
1346
1347        if (!cnt)
1348                goto chip_diag_failed;
1349
1350        ql_dbg(ql_dbg_init, vha, 0x007c,
1351            "Reset register cleared by chip reset.\n");
1352
1353        /* Reset RISC processor. */
1354        WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1355        WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1356
1357        /* Workaround for QLA2312 PCI parity error */
1358        if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1359                data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1360                for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1361                        udelay(5);
1362                        data = RD_MAILBOX_REG(ha, reg, 0);
1363                        barrier();
1364                }
1365        } else
1366                udelay(10);
1367
1368        if (!cnt)
1369                goto chip_diag_failed;
1370
1371        /* Check product ID of chip */
1372        ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1373
1374        mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1375        mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1376        mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1377        mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1378        if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1379            mb[3] != PROD_ID_3) {
1380                ql_log(ql_log_warn, vha, 0x0062,
1381                    "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1382                    mb[1], mb[2], mb[3]);
1383
1384                goto chip_diag_failed;
1385        }
1386        ha->product_id[0] = mb[1];
1387        ha->product_id[1] = mb[2];
1388        ha->product_id[2] = mb[3];
1389        ha->product_id[3] = mb[4];
1390
1391        /* Adjust fw RISC transfer size */
1392        if (req->length > 1024)
1393                ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1394        else
1395                ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1396                    req->length;
1397
1398        if (IS_QLA2200(ha) &&
1399            RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1400                /* Limit firmware transfer size with a 2200A */
1401                ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1402
1403                ha->device_type |= DT_ISP2200A;
1404                ha->fw_transfer_size = 128;
1405        }
1406
1407        /* Wrap Incoming Mailboxes Test. */
1408        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1409
1410        ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1411        rval = qla2x00_mbx_reg_test(vha);
1412        if (rval)
1413                ql_log(ql_log_warn, vha, 0x0080,
1414                    "Failed mailbox send register test.\n");
1415        else
1416                /* Flag a successful rval */
1417                rval = QLA_SUCCESS;
1418        spin_lock_irqsave(&ha->hardware_lock, flags);
1419
1420chip_diag_failed:
1421        if (rval)
1422                ql_log(ql_log_info, vha, 0x0081,
1423                    "Chip diagnostics **** FAILED ****.\n");
1424
1425        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1426
1427        return (rval);
1428}
1429
1430/**
1431 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1432 * @ha: HA context
1433 *
1434 * Returns 0 on success.
1435 */
1436int
1437qla24xx_chip_diag(scsi_qla_host_t *vha)
1438{
1439        int rval;
1440        struct qla_hw_data *ha = vha->hw;
1441        struct req_que *req = ha->req_q_map[0];
1442
1443        if (IS_P3P_TYPE(ha))
1444                return QLA_SUCCESS;
1445
1446        ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1447
1448        rval = qla2x00_mbx_reg_test(vha);
1449        if (rval) {
1450                ql_log(ql_log_warn, vha, 0x0082,
1451                    "Failed mailbox send register test.\n");
1452        } else {
1453                /* Flag a successful rval */
1454                rval = QLA_SUCCESS;
1455        }
1456
1457        return rval;
1458}
1459
1460void
1461qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1462{
1463        int rval;
1464        uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1465            eft_size, fce_size, mq_size;
1466        dma_addr_t tc_dma;
1467        void *tc;
1468        struct qla_hw_data *ha = vha->hw;
1469        struct req_que *req = ha->req_q_map[0];
1470        struct rsp_que *rsp = ha->rsp_q_map[0];
1471
1472        if (ha->fw_dump) {
1473                ql_dbg(ql_dbg_init, vha, 0x00bd,
1474                    "Firmware dump already allocated.\n");
1475                return;
1476        }
1477
1478        ha->fw_dumped = 0;
1479        dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1480        req_q_size = rsp_q_size = 0;
1481
1482        if (IS_QLA27XX(ha))
1483                goto try_fce;
1484
1485        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1486                fixed_size = sizeof(struct qla2100_fw_dump);
1487        } else if (IS_QLA23XX(ha)) {
1488                fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1489                mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1490                    sizeof(uint16_t);
1491        } else if (IS_FWI2_CAPABLE(ha)) {
1492                if (IS_QLA83XX(ha))
1493                        fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1494                else if (IS_QLA81XX(ha))
1495                        fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1496                else if (IS_QLA25XX(ha))
1497                        fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1498                else
1499                        fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1500
1501                mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1502                    sizeof(uint32_t);
1503                if (ha->mqenable) {
1504                        if (!IS_QLA83XX(ha))
1505                                mq_size = sizeof(struct qla2xxx_mq_chain);
1506                        /*
1507                         * Allocate maximum buffer size for all queues.
1508                         * Resizing must be done at end-of-dump processing.
1509                         */
1510                        mq_size += ha->max_req_queues *
1511                            (req->length * sizeof(request_t));
1512                        mq_size += ha->max_rsp_queues *
1513                            (rsp->length * sizeof(response_t));
1514                }
1515                if (ha->tgt.atio_ring)
1516                        mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1517                /* Allocate memory for Fibre Channel Event Buffer. */
1518                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1519                    !IS_QLA27XX(ha))
1520                        goto try_eft;
1521
1522try_fce:
1523                if (ha->fce)
1524                        dma_free_coherent(&ha->pdev->dev,
1525                            FCE_SIZE, ha->fce, ha->fce_dma);
1526
1527                /* Allocate memory for Fibre Channel Event Buffer. */
1528                tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1529                    GFP_KERNEL);
1530                if (!tc) {
1531                        ql_log(ql_log_warn, vha, 0x00be,
1532                            "Unable to allocate (%d KB) for FCE.\n",
1533                            FCE_SIZE / 1024);
1534                        goto try_eft;
1535                }
1536
1537                memset(tc, 0, FCE_SIZE);
1538                rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1539                    ha->fce_mb, &ha->fce_bufs);
1540                if (rval) {
1541                        ql_log(ql_log_warn, vha, 0x00bf,
1542                            "Unable to initialize FCE (%d).\n", rval);
1543                        dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1544                            tc_dma);
1545                        ha->flags.fce_enabled = 0;
1546                        goto try_eft;
1547                }
1548                ql_dbg(ql_dbg_init, vha, 0x00c0,
1549                    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1550
1551                fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1552                ha->flags.fce_enabled = 1;
1553                ha->fce_dma = tc_dma;
1554                ha->fce = tc;
1555
1556try_eft:
1557                if (ha->eft)
1558                        dma_free_coherent(&ha->pdev->dev,
1559                            EFT_SIZE, ha->eft, ha->eft_dma);
1560
1561                /* Allocate memory for Extended Trace Buffer. */
1562                tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1563                    GFP_KERNEL);
1564                if (!tc) {
1565                        ql_log(ql_log_warn, vha, 0x00c1,
1566                            "Unable to allocate (%d KB) for EFT.\n",
1567                            EFT_SIZE / 1024);
1568                        goto cont_alloc;
1569                }
1570
1571                memset(tc, 0, EFT_SIZE);
1572                rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1573                if (rval) {
1574                        ql_log(ql_log_warn, vha, 0x00c2,
1575                            "Unable to initialize EFT (%d).\n", rval);
1576                        dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1577                            tc_dma);
1578                        goto cont_alloc;
1579                }
1580                ql_dbg(ql_dbg_init, vha, 0x00c3,
1581                    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1582
1583                eft_size = EFT_SIZE;
1584                ha->eft_dma = tc_dma;
1585                ha->eft = tc;
1586        }
1587
1588cont_alloc:
1589        if (IS_QLA27XX(ha)) {
1590                if (!ha->fw_dump_template) {
1591                        ql_log(ql_log_warn, vha, 0x00ba,
1592                            "Failed missing fwdump template\n");
1593                        return;
1594                }
1595                dump_size = qla27xx_fwdt_calculate_dump_size(vha);
1596                ql_dbg(ql_dbg_init, vha, 0x00fa,
1597                    "-> allocating fwdump (%x bytes)...\n", dump_size);
1598                goto allocate;
1599        }
1600
1601        req_q_size = req->length * sizeof(request_t);
1602        rsp_q_size = rsp->length * sizeof(response_t);
1603        dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1604        dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1605        ha->chain_offset = dump_size;
1606        dump_size += mq_size + fce_size;
1607
1608allocate:
1609        ha->fw_dump = vmalloc(dump_size);
1610        if (!ha->fw_dump) {
1611                ql_log(ql_log_warn, vha, 0x00c4,
1612                    "Unable to allocate (%d KB) for firmware dump.\n",
1613                    dump_size / 1024);
1614
1615                if (ha->fce) {
1616                        dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1617                            ha->fce_dma);
1618                        ha->fce = NULL;
1619                        ha->fce_dma = 0;
1620                }
1621
1622                if (ha->eft) {
1623                        dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1624                            ha->eft_dma);
1625                        ha->eft = NULL;
1626                        ha->eft_dma = 0;
1627                }
1628                return;
1629        }
1630        ha->fw_dump_len = dump_size;
1631        ql_dbg(ql_dbg_init, vha, 0x00c5,
1632            "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1633
1634        if (IS_QLA27XX(ha))
1635                return;
1636
1637        ha->fw_dump->signature[0] = 'Q';
1638        ha->fw_dump->signature[1] = 'L';
1639        ha->fw_dump->signature[2] = 'G';
1640        ha->fw_dump->signature[3] = 'C';
1641        ha->fw_dump->version = __constant_htonl(1);
1642
1643        ha->fw_dump->fixed_size = htonl(fixed_size);
1644        ha->fw_dump->mem_size = htonl(mem_size);
1645        ha->fw_dump->req_q_size = htonl(req_q_size);
1646        ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1647
1648        ha->fw_dump->eft_size = htonl(eft_size);
1649        ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1650        ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1651
1652        ha->fw_dump->header_size =
1653            htonl(offsetof(struct qla2xxx_fw_dump, isp));
1654}
1655
1656static int
1657qla81xx_mpi_sync(scsi_qla_host_t *vha)
1658{
1659#define MPS_MASK        0xe0
1660        int rval;
1661        uint16_t dc;
1662        uint32_t dw;
1663
1664        if (!IS_QLA81XX(vha->hw))
1665                return QLA_SUCCESS;
1666
1667        rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1668        if (rval != QLA_SUCCESS) {
1669                ql_log(ql_log_warn, vha, 0x0105,
1670                    "Unable to acquire semaphore.\n");
1671                goto done;
1672        }
1673
1674        pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1675        rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1676        if (rval != QLA_SUCCESS) {
1677                ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1678                goto done_release;
1679        }
1680
1681        dc &= MPS_MASK;
1682        if (dc == (dw & MPS_MASK))
1683                goto done_release;
1684
1685        dw &= ~MPS_MASK;
1686        dw |= dc;
1687        rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1688        if (rval != QLA_SUCCESS) {
1689                ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1690        }
1691
1692done_release:
1693        rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1694        if (rval != QLA_SUCCESS) {
1695                ql_log(ql_log_warn, vha, 0x006d,
1696                    "Unable to release semaphore.\n");
1697        }
1698
1699done:
1700        return rval;
1701}
1702
1703int
1704qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
1705{
1706        /* Don't try to reallocate the array */
1707        if (req->outstanding_cmds)
1708                return QLA_SUCCESS;
1709
1710        if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
1711            (ql2xmultique_tag || ql2xmaxqueues > 1)))
1712                req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
1713        else {
1714                if (ha->fw_xcb_count <= ha->fw_iocb_count)
1715                        req->num_outstanding_cmds = ha->fw_xcb_count;
1716                else
1717                        req->num_outstanding_cmds = ha->fw_iocb_count;
1718        }
1719
1720        req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1721            req->num_outstanding_cmds, GFP_KERNEL);
1722
1723        if (!req->outstanding_cmds) {
1724                /*
1725                 * Try to allocate a minimal size just so we can get through
1726                 * initialization.
1727                 */
1728                req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
1729                req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1730                    req->num_outstanding_cmds, GFP_KERNEL);
1731
1732                if (!req->outstanding_cmds) {
1733                        ql_log(ql_log_fatal, NULL, 0x0126,
1734                            "Failed to allocate memory for "
1735                            "outstanding_cmds for req_que %p.\n", req);
1736                        req->num_outstanding_cmds = 0;
1737                        return QLA_FUNCTION_FAILED;
1738                }
1739        }
1740
1741        return QLA_SUCCESS;
1742}
1743
1744/**
1745 * qla2x00_setup_chip() - Load and start RISC firmware.
1746 * @ha: HA context
1747 *
1748 * Returns 0 on success.
1749 */
1750static int
1751qla2x00_setup_chip(scsi_qla_host_t *vha)
1752{
1753        int rval;
1754        uint32_t srisc_address = 0;
1755        struct qla_hw_data *ha = vha->hw;
1756        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1757        unsigned long flags;
1758        uint16_t fw_major_version;
1759
1760        if (IS_P3P_TYPE(ha)) {
1761                rval = ha->isp_ops->load_risc(vha, &srisc_address);
1762                if (rval == QLA_SUCCESS) {
1763                        qla2x00_stop_firmware(vha);
1764                        goto enable_82xx_npiv;
1765                } else
1766                        goto failed;
1767        }
1768
1769        if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1770                /* Disable SRAM, Instruction RAM and GP RAM parity.  */
1771                spin_lock_irqsave(&ha->hardware_lock, flags);
1772                WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1773                RD_REG_WORD(&reg->hccr);
1774                spin_unlock_irqrestore(&ha->hardware_lock, flags);
1775        }
1776
1777        qla81xx_mpi_sync(vha);
1778
1779        /* Load firmware sequences */
1780        rval = ha->isp_ops->load_risc(vha, &srisc_address);
1781        if (rval == QLA_SUCCESS) {
1782                ql_dbg(ql_dbg_init, vha, 0x00c9,
1783                    "Verifying Checksum of loaded RISC code.\n");
1784
1785                rval = qla2x00_verify_checksum(vha, srisc_address);
1786                if (rval == QLA_SUCCESS) {
1787                        /* Start firmware execution. */
1788                        ql_dbg(ql_dbg_init, vha, 0x00ca,
1789                            "Starting firmware.\n");
1790
1791                        rval = qla2x00_execute_fw(vha, srisc_address);
1792                        /* Retrieve firmware information. */
1793                        if (rval == QLA_SUCCESS) {
1794enable_82xx_npiv:
1795                                fw_major_version = ha->fw_major_version;
1796                                if (IS_P3P_TYPE(ha))
1797                                        qla82xx_check_md_needed(vha);
1798                                else
1799                                        rval = qla2x00_get_fw_version(vha);
1800                                if (rval != QLA_SUCCESS)
1801                                        goto failed;
1802                                ha->flags.npiv_supported = 0;
1803                                if (IS_QLA2XXX_MIDTYPE(ha) &&
1804                                         (ha->fw_attributes & BIT_2)) {
1805                                        ha->flags.npiv_supported = 1;
1806                                        if ((!ha->max_npiv_vports) ||
1807                                            ((ha->max_npiv_vports + 1) %
1808                                            MIN_MULTI_ID_FABRIC))
1809                                                ha->max_npiv_vports =
1810                                                    MIN_MULTI_ID_FABRIC - 1;
1811                                }
1812                                qla2x00_get_resource_cnts(vha, NULL,
1813                                    &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
1814                                    &ha->max_npiv_vports, NULL);
1815
1816                                /*
1817                                 * Allocate the array of outstanding commands
1818                                 * now that we know the firmware resources.
1819                                 */
1820                                rval = qla2x00_alloc_outstanding_cmds(ha,
1821                                    vha->req);
1822                                if (rval != QLA_SUCCESS)
1823                                        goto failed;
1824
1825                                if (!fw_major_version && ql2xallocfwdump
1826                                    && !(IS_P3P_TYPE(ha)))
1827                                        qla2x00_alloc_fw_dump(vha);
1828                        } else {
1829                                goto failed;
1830                        }
1831                } else {
1832                        ql_log(ql_log_fatal, vha, 0x00cd,
1833                            "ISP Firmware failed checksum.\n");
1834                        goto failed;
1835                }
1836        } else
1837                goto failed;
1838
1839        if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1840                /* Enable proper parity. */
1841                spin_lock_irqsave(&ha->hardware_lock, flags);
1842                if (IS_QLA2300(ha))
1843                        /* SRAM parity */
1844                        WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1845                else
1846                        /* SRAM, Instruction RAM and GP RAM parity */
1847                        WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1848                RD_REG_WORD(&reg->hccr);
1849                spin_unlock_irqrestore(&ha->hardware_lock, flags);
1850        }
1851
1852        if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1853                uint32_t size;
1854
1855                rval = qla81xx_fac_get_sector_size(vha, &size);
1856                if (rval == QLA_SUCCESS) {
1857                        ha->flags.fac_supported = 1;
1858                        ha->fdt_block_size = size << 2;
1859                } else {
1860                        ql_log(ql_log_warn, vha, 0x00ce,
1861                            "Unsupported FAC firmware (%d.%02d.%02d).\n",
1862                            ha->fw_major_version, ha->fw_minor_version,
1863                            ha->fw_subminor_version);
1864
1865                        if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1866                                ha->flags.fac_supported = 0;
1867                                rval = QLA_SUCCESS;
1868                        }
1869                }
1870        }
1871failed:
1872        if (rval) {
1873                ql_log(ql_log_fatal, vha, 0x00cf,
1874                    "Setup chip ****FAILED****.\n");
1875        }
1876
1877        return (rval);
1878}
1879
1880/**
1881 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1882 * @ha: HA context
1883 *
1884 * Beginning of request ring has initialization control block already built
1885 * by nvram config routine.
1886 *
1887 * Returns 0 on success.
1888 */
1889void
1890qla2x00_init_response_q_entries(struct rsp_que *rsp)
1891{
1892        uint16_t cnt;
1893        response_t *pkt;
1894
1895        rsp->ring_ptr = rsp->ring;
1896        rsp->ring_index    = 0;
1897        rsp->status_srb = NULL;
1898        pkt = rsp->ring_ptr;
1899        for (cnt = 0; cnt < rsp->length; cnt++) {
1900                pkt->signature = RESPONSE_PROCESSED;
1901                pkt++;
1902        }
1903}
1904
1905/**
1906 * qla2x00_update_fw_options() - Read and process firmware options.
1907 * @ha: HA context
1908 *
1909 * Returns 0 on success.
1910 */
1911void
1912qla2x00_update_fw_options(scsi_qla_host_t *vha)
1913{
1914        uint16_t swing, emphasis, tx_sens, rx_sens;
1915        struct qla_hw_data *ha = vha->hw;
1916
1917        memset(ha->fw_options, 0, sizeof(ha->fw_options));
1918        qla2x00_get_fw_options(vha, ha->fw_options);
1919
1920        if (IS_QLA2100(ha) || IS_QLA2200(ha))
1921                return;
1922
1923        /* Serial Link options. */
1924        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1925            "Serial link options.\n");
1926        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1927            (uint8_t *)&ha->fw_seriallink_options,
1928            sizeof(ha->fw_seriallink_options));
1929
1930        ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1931        if (ha->fw_seriallink_options[3] & BIT_2) {
1932                ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1933
1934                /*  1G settings */
1935                swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1936                emphasis = (ha->fw_seriallink_options[2] &
1937                    (BIT_4 | BIT_3)) >> 3;
1938                tx_sens = ha->fw_seriallink_options[0] &
1939                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1940                rx_sens = (ha->fw_seriallink_options[0] &
1941                    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1942                ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1943                if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1944                        if (rx_sens == 0x0)
1945                                rx_sens = 0x3;
1946                        ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1947                } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1948                        ha->fw_options[10] |= BIT_5 |
1949                            ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1950                            (tx_sens & (BIT_1 | BIT_0));
1951
1952                /*  2G settings */
1953                swing = (ha->fw_seriallink_options[2] &
1954                    (BIT_7 | BIT_6 | BIT_5)) >> 5;
1955                emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1956                tx_sens = ha->fw_seriallink_options[1] &
1957                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1958                rx_sens = (ha->fw_seriallink_options[1] &
1959                    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1960                ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1961                if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1962                        if (rx_sens == 0x0)
1963                                rx_sens = 0x3;
1964                        ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1965                } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1966                        ha->fw_options[11] |= BIT_5 |
1967                            ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1968                            (tx_sens & (BIT_1 | BIT_0));
1969        }
1970
1971        /* FCP2 options. */
1972        /*  Return command IOCBs without waiting for an ABTS to complete. */
1973        ha->fw_options[3] |= BIT_13;
1974
1975        /* LED scheme. */
1976        if (ha->flags.enable_led_scheme)
1977                ha->fw_options[2] |= BIT_12;
1978
1979        /* Detect ISP6312. */
1980        if (IS_QLA6312(ha))
1981                ha->fw_options[2] |= BIT_13;
1982
1983        /* Update firmware options. */
1984        qla2x00_set_fw_options(vha, ha->fw_options);
1985}
1986
1987void
1988qla24xx_update_fw_options(scsi_qla_host_t *vha)
1989{
1990        int rval;
1991        struct qla_hw_data *ha = vha->hw;
1992
1993        if (IS_P3P_TYPE(ha))
1994                return;
1995
1996        /* Update Serial Link options. */
1997        if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1998                return;
1999
2000        rval = qla2x00_set_serdes_params(vha,
2001            le16_to_cpu(ha->fw_seriallink_options24[1]),
2002            le16_to_cpu(ha->fw_seriallink_options24[2]),
2003            le16_to_cpu(ha->fw_seriallink_options24[3]));
2004        if (rval != QLA_SUCCESS) {
2005                ql_log(ql_log_warn, vha, 0x0104,
2006                    "Unable to update Serial Link options (%x).\n", rval);
2007        }
2008}
2009
2010void
2011qla2x00_config_rings(struct scsi_qla_host *vha)
2012{
2013        struct qla_hw_data *ha = vha->hw;
2014        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2015        struct req_que *req = ha->req_q_map[0];
2016        struct rsp_que *rsp = ha->rsp_q_map[0];
2017
2018        /* Setup ring parameters in initialization control block. */
2019        ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
2020        ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
2021        ha->init_cb->request_q_length = cpu_to_le16(req->length);
2022        ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
2023        ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
2024        ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
2025        ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
2026        ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
2027
2028        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
2029        WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
2030        WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
2031        WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
2032        RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));            /* PCI Posting. */
2033}
2034
2035void
2036qla24xx_config_rings(struct scsi_qla_host *vha)
2037{
2038        struct qla_hw_data *ha = vha->hw;
2039        device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
2040        struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
2041        struct qla_msix_entry *msix;
2042        struct init_cb_24xx *icb;
2043        uint16_t rid = 0;
2044        struct req_que *req = ha->req_q_map[0];
2045        struct rsp_que *rsp = ha->rsp_q_map[0];
2046
2047        /* Setup ring parameters in initialization control block. */
2048        icb = (struct init_cb_24xx *)ha->init_cb;
2049        icb->request_q_outpointer = __constant_cpu_to_le16(0);
2050        icb->response_q_inpointer = __constant_cpu_to_le16(0);
2051        icb->request_q_length = cpu_to_le16(req->length);
2052        icb->response_q_length = cpu_to_le16(rsp->length);
2053        icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
2054        icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
2055        icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
2056        icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
2057
2058        /* Setup ATIO queue dma pointers for target mode */
2059        icb->atio_q_inpointer = __constant_cpu_to_le16(0);
2060        icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
2061        icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
2062        icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
2063
2064        if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2065                icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
2066                icb->rid = __constant_cpu_to_le16(rid);
2067                if (ha->flags.msix_enabled) {
2068                        msix = &ha->msix_entries[1];
2069                        ql_dbg(ql_dbg_init, vha, 0x00fd,
2070                            "Registering vector 0x%x for base que.\n",
2071                            msix->entry);
2072                        icb->msix = cpu_to_le16(msix->entry);
2073                }
2074                /* Use alternate PCI bus number */
2075                if (MSB(rid))
2076                        icb->firmware_options_2 |=
2077                                __constant_cpu_to_le32(BIT_19);
2078                /* Use alternate PCI devfn */
2079                if (LSB(rid))
2080                        icb->firmware_options_2 |=
2081                                __constant_cpu_to_le32(BIT_18);
2082
2083                /* Use Disable MSIX Handshake mode for capable adapters */
2084                if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
2085                    (ha->flags.msix_enabled)) {
2086                        icb->firmware_options_2 &=
2087                                __constant_cpu_to_le32(~BIT_22);
2088                        ha->flags.disable_msix_handshake = 1;
2089                        ql_dbg(ql_dbg_init, vha, 0x00fe,
2090                            "MSIX Handshake Disable Mode turned on.\n");
2091                } else {
2092                        icb->firmware_options_2 |=
2093                                __constant_cpu_to_le32(BIT_22);
2094                }
2095                icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
2096
2097                WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
2098                WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
2099                WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
2100                WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
2101        } else {
2102                WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
2103                WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
2104                WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
2105                WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
2106        }
2107        qlt_24xx_config_rings(vha);
2108
2109        /* PCI posting */
2110        RD_REG_DWORD(&ioreg->hccr);
2111}
2112
2113/**
2114 * qla2x00_init_rings() - Initializes firmware.
2115 * @ha: HA context
2116 *
2117 * Beginning of request ring has initialization control block already built
2118 * by nvram config routine.
2119 *
2120 * Returns 0 on success.
2121 */
2122int
2123qla2x00_init_rings(scsi_qla_host_t *vha)
2124{
2125        int     rval;
2126        unsigned long flags = 0;
2127        int cnt, que;
2128        struct qla_hw_data *ha = vha->hw;
2129        struct req_que *req;
2130        struct rsp_que *rsp;
2131        struct mid_init_cb_24xx *mid_init_cb =
2132            (struct mid_init_cb_24xx *) ha->init_cb;
2133
2134        spin_lock_irqsave(&ha->hardware_lock, flags);
2135
2136        /* Clear outstanding commands array. */
2137        for (que = 0; que < ha->max_req_queues; que++) {
2138                req = ha->req_q_map[que];
2139                if (!req)
2140                        continue;
2141                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
2142                        req->outstanding_cmds[cnt] = NULL;
2143
2144                req->current_outstanding_cmd = 1;
2145
2146                /* Initialize firmware. */
2147                req->ring_ptr  = req->ring;
2148                req->ring_index    = 0;
2149                req->cnt      = req->length;
2150        }
2151
2152        for (que = 0; que < ha->max_rsp_queues; que++) {
2153                rsp = ha->rsp_q_map[que];
2154                if (!rsp)
2155                        continue;
2156                /* Initialize response queue entries */
2157                if (IS_QLAFX00(ha))
2158                        qlafx00_init_response_q_entries(rsp);
2159                else
2160                        qla2x00_init_response_q_entries(rsp);
2161        }
2162
2163        ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
2164        ha->tgt.atio_ring_index = 0;
2165        /* Initialize ATIO queue entries */
2166        qlt_init_atio_q_entries(vha);
2167
2168        ha->isp_ops->config_rings(vha);
2169
2170        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2171
2172        ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
2173
2174        if (IS_QLAFX00(ha)) {
2175                rval = qlafx00_init_firmware(vha, ha->init_cb_size);
2176                goto next_check;
2177        }
2178
2179        /* Update any ISP specific firmware options before initialization. */
2180        ha->isp_ops->update_fw_options(vha);
2181
2182        if (ha->flags.npiv_supported) {
2183                if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
2184                        ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
2185                mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
2186        }
2187
2188        if (IS_FWI2_CAPABLE(ha)) {
2189                mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
2190                mid_init_cb->init_cb.execution_throttle =
2191                    cpu_to_le16(ha->fw_xcb_count);
2192        }
2193
2194        rval = qla2x00_init_firmware(vha, ha->init_cb_size);
2195next_check:
2196        if (rval) {
2197                ql_log(ql_log_fatal, vha, 0x00d2,
2198                    "Init Firmware **** FAILED ****.\n");
2199        } else {
2200                ql_dbg(ql_dbg_init, vha, 0x00d3,
2201                    "Init Firmware -- success.\n");
2202        }
2203
2204        return (rval);
2205}
2206
2207/**
2208 * qla2x00_fw_ready() - Waits for firmware ready.
2209 * @ha: HA context
2210 *
2211 * Returns 0 on success.
2212 */
2213static int
2214qla2x00_fw_ready(scsi_qla_host_t *vha)
2215{
2216        int             rval;
2217        unsigned long   wtime, mtime, cs84xx_time;
2218        uint16_t        min_wait;       /* Minimum wait time if loop is down */
2219        uint16_t        wait_time;      /* Wait time if loop is coming ready */
2220        uint16_t        state[5];
2221        struct qla_hw_data *ha = vha->hw;
2222
2223        if (IS_QLAFX00(vha->hw))
2224                return qlafx00_fw_ready(vha);
2225
2226        rval = QLA_SUCCESS;
2227
2228        /* 20 seconds for loop down. */
2229        min_wait = 20;
2230
2231        /*
2232         * Firmware should take at most one RATOV to login, plus 5 seconds for
2233         * our own processing.
2234         */
2235        if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
2236                wait_time = min_wait;
2237        }
2238
2239        /* Min wait time if loop down */
2240        mtime = jiffies + (min_wait * HZ);
2241
2242        /* wait time before firmware ready */
2243        wtime = jiffies + (wait_time * HZ);
2244
2245        /* Wait for ISP to finish LIP */
2246        if (!vha->flags.init_done)
2247                ql_log(ql_log_info, vha, 0x801e,
2248                    "Waiting for LIP to complete.\n");
2249
2250        do {
2251                memset(state, -1, sizeof(state));
2252                rval = qla2x00_get_firmware_state(vha, state);
2253                if (rval == QLA_SUCCESS) {
2254                        if (state[0] < FSTATE_LOSS_OF_SYNC) {
2255                                vha->device_flags &= ~DFLG_NO_CABLE;
2256                        }
2257                        if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
2258                                ql_dbg(ql_dbg_taskm, vha, 0x801f,
2259                                    "fw_state=%x 84xx=%x.\n", state[0],
2260                                    state[2]);
2261                                if ((state[2] & FSTATE_LOGGED_IN) &&
2262                                     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
2263                                        ql_dbg(ql_dbg_taskm, vha, 0x8028,
2264                                            "Sending verify iocb.\n");
2265
2266                                        cs84xx_time = jiffies;
2267                                        rval = qla84xx_init_chip(vha);
2268                                        if (rval != QLA_SUCCESS) {
2269                                                ql_log(ql_log_warn,
2270                                                    vha, 0x8007,
2271                                                    "Init chip failed.\n");
2272                                                break;
2273                                        }
2274
2275                                        /* Add time taken to initialize. */
2276                                        cs84xx_time = jiffies - cs84xx_time;
2277                                        wtime += cs84xx_time;
2278                                        mtime += cs84xx_time;
2279                                        ql_dbg(ql_dbg_taskm, vha, 0x8008,
2280                                            "Increasing wait time by %ld. "
2281                                            "New time %ld.\n", cs84xx_time,
2282                                            wtime);
2283                                }
2284                        } else if (state[0] == FSTATE_READY) {
2285                                ql_dbg(ql_dbg_taskm, vha, 0x8037,
2286                                    "F/W Ready - OK.\n");
2287
2288                                qla2x00_get_retry_cnt(vha, &ha->retry_count,
2289                                    &ha->login_timeout, &ha->r_a_tov);
2290
2291                                rval = QLA_SUCCESS;
2292                                break;
2293                        }
2294
2295                        rval = QLA_FUNCTION_FAILED;
2296
2297                        if (atomic_read(&vha->loop_down_timer) &&
2298                            state[0] != FSTATE_READY) {
2299                                /* Loop down. Timeout on min_wait for states
2300                                 * other than Wait for Login.
2301                                 */
2302                                if (time_after_eq(jiffies, mtime)) {
2303                                        ql_log(ql_log_info, vha, 0x8038,
2304                                            "Cable is unplugged...\n");
2305
2306                                        vha->device_flags |= DFLG_NO_CABLE;
2307                                        break;
2308                                }
2309                        }
2310                } else {
2311                        /* Mailbox cmd failed. Timeout on min_wait. */
2312                        if (time_after_eq(jiffies, mtime) ||
2313                                ha->flags.isp82xx_fw_hung)
2314                                break;
2315                }
2316
2317                if (time_after_eq(jiffies, wtime))
2318                        break;
2319
2320                /* Delay for a while */
2321                msleep(500);
2322        } while (1);
2323
2324        ql_dbg(ql_dbg_taskm, vha, 0x803a,
2325            "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
2326            state[1], state[2], state[3], state[4], jiffies);
2327
2328        if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
2329                ql_log(ql_log_warn, vha, 0x803b,
2330                    "Firmware ready **** FAILED ****.\n");
2331        }
2332
2333        return (rval);
2334}
2335
2336/*
2337*  qla2x00_configure_hba
2338*      Setup adapter context.
2339*
2340* Input:
2341*      ha = adapter state pointer.
2342*
2343* Returns:
2344*      0 = success
2345*
2346* Context:
2347*      Kernel context.
2348*/
2349static int
2350qla2x00_configure_hba(scsi_qla_host_t *vha)
2351{
2352        int       rval;
2353        uint16_t      loop_id;
2354        uint16_t      topo;
2355        uint16_t      sw_cap;
2356        uint8_t       al_pa;
2357        uint8_t       area;
2358        uint8_t       domain;
2359        char            connect_type[22];
2360        struct qla_hw_data *ha = vha->hw;
2361        unsigned long flags;
2362        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
2363
2364        /* Get host addresses. */
2365        rval = qla2x00_get_adapter_id(vha,
2366            &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2367        if (rval != QLA_SUCCESS) {
2368                if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2369                    IS_CNA_CAPABLE(ha) ||
2370                    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2371                        ql_dbg(ql_dbg_disc, vha, 0x2008,
2372                            "Loop is in a transition state.\n");
2373                } else {
2374                        ql_log(ql_log_warn, vha, 0x2009,
2375                            "Unable to get host loop ID.\n");
2376                        if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
2377                            (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
2378                                ql_log(ql_log_warn, vha, 0x1151,
2379                                    "Doing link init.\n");
2380                                if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
2381                                        return rval;
2382                        }
2383                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2384                }
2385                return (rval);
2386        }
2387
2388        if (topo == 4) {
2389                ql_log(ql_log_info, vha, 0x200a,
2390                    "Cannot get topology - retrying.\n");
2391                return (QLA_FUNCTION_FAILED);
2392        }
2393
2394        vha->loop_id = loop_id;
2395
2396        /* initialize */
2397        ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2398        ha->operating_mode = LOOP;
2399        ha->switch_cap = 0;
2400
2401        switch (topo) {
2402        case 0:
2403                ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2404                ha->current_topology = ISP_CFG_NL;
2405                strcpy(connect_type, "(Loop)");
2406                break;
2407
2408        case 1:
2409                ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2410                ha->switch_cap = sw_cap;
2411                ha->current_topology = ISP_CFG_FL;
2412                strcpy(connect_type, "(FL_Port)");
2413                break;
2414
2415        case 2:
2416                ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2417                ha->operating_mode = P2P;
2418                ha->current_topology = ISP_CFG_N;
2419                strcpy(connect_type, "(N_Port-to-N_Port)");
2420                break;
2421
2422        case 3:
2423                ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2424                ha->switch_cap = sw_cap;
2425                ha->operating_mode = P2P;
2426                ha->current_topology = ISP_CFG_F;
2427                strcpy(connect_type, "(F_Port)");
2428                break;
2429
2430        default:
2431                ql_dbg(ql_dbg_disc, vha, 0x200f,
2432                    "HBA in unknown topology %x, using NL.\n", topo);
2433                ha->current_topology = ISP_CFG_NL;
2434                strcpy(connect_type, "(Loop)");
2435                break;
2436        }
2437
2438        /* Save Host port and loop ID. */
2439        /* byte order - Big Endian */
2440        vha->d_id.b.domain = domain;
2441        vha->d_id.b.area = area;
2442        vha->d_id.b.al_pa = al_pa;
2443
2444        spin_lock_irqsave(&ha->vport_slock, flags);
2445        qlt_update_vp_map(vha, SET_AL_PA);
2446        spin_unlock_irqrestore(&ha->vport_slock, flags);
2447
2448        if (!vha->flags.init_done)
2449                ql_log(ql_log_info, vha, 0x2010,
2450                    "Topology - %s, Host Loop address 0x%x.\n",
2451                    connect_type, vha->loop_id);
2452
2453        return(rval);
2454}
2455
2456inline void
2457qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2458        char *def)
2459{
2460        char *st, *en;
2461        uint16_t index;
2462        struct qla_hw_data *ha = vha->hw;
2463        int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2464            !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
2465
2466        if (memcmp(model, BINZERO, len) != 0) {
2467                strncpy(ha->model_number, model, len);
2468                st = en = ha->model_number;
2469                en += len - 1;
2470                while (en > st) {
2471                        if (*en != 0x20 && *en != 0x00)
2472                                break;
2473                        *en-- = '\0';
2474                }
2475
2476                index = (ha->pdev->subsystem_device & 0xff);
2477                if (use_tbl &&
2478                    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2479                    index < QLA_MODEL_NAMES)
2480                        strncpy(ha->model_desc,
2481                            qla2x00_model_name[index * 2 + 1],
2482                            sizeof(ha->model_desc) - 1);
2483        } else {
2484                index = (ha->pdev->subsystem_device & 0xff);
2485                if (use_tbl &&
2486                    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2487                    index < QLA_MODEL_NAMES) {
2488                        strcpy(ha->model_number,
2489                            qla2x00_model_name[index * 2]);
2490                        strncpy(ha->model_desc,
2491                            qla2x00_model_name[index * 2 + 1],
2492                            sizeof(ha->model_desc) - 1);
2493                } else {
2494                        strcpy(ha->model_number, def);
2495                }
2496        }
2497        if (IS_FWI2_CAPABLE(ha))
2498                qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2499                    sizeof(ha->model_desc));
2500}
2501
2502/* On sparc systems, obtain port and node WWN from firmware
2503 * properties.
2504 */
2505static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2506{
2507#ifdef CONFIG_SPARC
2508        struct qla_hw_data *ha = vha->hw;
2509        struct pci_dev *pdev = ha->pdev;
2510        struct device_node *dp = pci_device_to_OF_node(pdev);
2511        const u8 *val;
2512        int len;
2513
2514        val = of_get_property(dp, "port-wwn", &len);
2515        if (val && len >= WWN_SIZE)
2516                memcpy(nv->port_name, val, WWN_SIZE);
2517
2518        val = of_get_property(dp, "node-wwn", &len);
2519        if (val && len >= WWN_SIZE)
2520                memcpy(nv->node_name, val, WWN_SIZE);
2521#endif
2522}
2523
2524/*
2525* NVRAM configuration for ISP 2xxx
2526*
2527* Input:
2528*      ha                = adapter block pointer.
2529*
2530* Output:
2531*      initialization control block in response_ring
2532*      host adapters parameters in host adapter block
2533*
2534* Returns:
2535*      0 = success.
2536*/
2537int
2538qla2x00_nvram_config(scsi_qla_host_t *vha)
2539{
2540        int             rval;
2541        uint8_t         chksum = 0;
2542        uint16_t        cnt;
2543        uint8_t         *dptr1, *dptr2;
2544        struct qla_hw_data *ha = vha->hw;
2545        init_cb_t       *icb = ha->init_cb;
2546        nvram_t         *nv = ha->nvram;
2547        uint8_t         *ptr = ha->nvram;
2548        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2549
2550        rval = QLA_SUCCESS;
2551
2552        /* Determine NVRAM starting address. */
2553        ha->nvram_size = sizeof(nvram_t);
2554        ha->nvram_base = 0;
2555        if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2556                if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2557                        ha->nvram_base = 0x80;
2558
2559        /* Get NVRAM data and calculate checksum. */
2560        ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2561        for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2562                chksum += *ptr++;
2563
2564        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2565            "Contents of NVRAM.\n");
2566        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2567            (uint8_t *)nv, ha->nvram_size);
2568
2569        /* Bad NVRAM data, set defaults parameters. */
2570        if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2571            nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2572                /* Reset NVRAM data. */
2573                ql_log(ql_log_warn, vha, 0x0064,
2574                    "Inconsistent NVRAM "
2575                    "detected: checksum=0x%x id=%c version=0x%x.\n",
2576                    chksum, nv->id[0], nv->nvram_version);
2577                ql_log(ql_log_warn, vha, 0x0065,
2578                    "Falling back to "
2579                    "functioning (yet invalid -- WWPN) defaults.\n");
2580
2581                /*
2582                 * Set default initialization control block.
2583                 */
2584                memset(nv, 0, ha->nvram_size);
2585                nv->parameter_block_version = ICB_VERSION;
2586
2587                if (IS_QLA23XX(ha)) {
2588                        nv->firmware_options[0] = BIT_2 | BIT_1;
2589                        nv->firmware_options[1] = BIT_7 | BIT_5;
2590                        nv->add_firmware_options[0] = BIT_5;
2591                        nv->add_firmware_options[1] = BIT_5 | BIT_4;
2592                        nv->frame_payload_size = __constant_cpu_to_le16(2048);
2593                        nv->special_options[1] = BIT_7;
2594                } else if (IS_QLA2200(ha)) {
2595                        nv->firmware_options[0] = BIT_2 | BIT_1;
2596                        nv->firmware_options[1] = BIT_7 | BIT_5;
2597                        nv->add_firmware_options[0] = BIT_5;
2598                        nv->add_firmware_options[1] = BIT_5 | BIT_4;
2599                        nv->frame_payload_size = __constant_cpu_to_le16(1024);
2600                } else if (IS_QLA2100(ha)) {
2601                        nv->firmware_options[0] = BIT_3 | BIT_1;
2602                        nv->firmware_options[1] = BIT_5;
2603                        nv->frame_payload_size = __constant_cpu_to_le16(1024);
2604                }
2605
2606                nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2607                nv->execution_throttle = __constant_cpu_to_le16(16);
2608                nv->retry_count = 8;
2609                nv->retry_delay = 1;
2610
2611                nv->port_name[0] = 33;
2612                nv->port_name[3] = 224;
2613                nv->port_name[4] = 139;
2614
2615                qla2xxx_nvram_wwn_from_ofw(vha, nv);
2616
2617                nv->login_timeout = 4;
2618
2619                /*
2620                 * Set default host adapter parameters
2621                 */
2622                nv->host_p[1] = BIT_2;
2623                nv->reset_delay = 5;
2624                nv->port_down_retry_count = 8;
2625                nv->max_luns_per_target = __constant_cpu_to_le16(8);
2626                nv->link_down_timeout = 60;
2627
2628                rval = 1;
2629        }
2630
2631#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2632        /*
2633         * The SN2 does not provide BIOS emulation which means you can't change
2634         * potentially bogus BIOS settings. Force the use of default settings
2635         * for link rate and frame size.  Hope that the rest of the settings
2636         * are valid.
2637         */
2638        if (ia64_platform_is("sn2")) {
2639                nv->frame_payload_size = __constant_cpu_to_le16(2048);
2640                if (IS_QLA23XX(ha))
2641                        nv->special_options[1] = BIT_7;
2642        }
2643#endif
2644
2645        /* Reset Initialization control block */
2646        memset(icb, 0, ha->init_cb_size);
2647
2648        /*
2649         * Setup driver NVRAM options.
2650         */
2651        nv->firmware_options[0] |= (BIT_6 | BIT_1);
2652        nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2653        nv->firmware_options[1] |= (BIT_5 | BIT_0);
2654        nv->firmware_options[1] &= ~BIT_4;
2655
2656        if (IS_QLA23XX(ha)) {
2657                nv->firmware_options[0] |= BIT_2;
2658                nv->firmware_options[0] &= ~BIT_3;
2659                nv->special_options[0] &= ~BIT_6;
2660                nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2661
2662                if (IS_QLA2300(ha)) {
2663                        if (ha->fb_rev == FPM_2310) {
2664                                strcpy(ha->model_number, "QLA2310");
2665                        } else {
2666                                strcpy(ha->model_number, "QLA2300");
2667                        }
2668                } else {
2669                        qla2x00_set_model_info(vha, nv->model_number,
2670                            sizeof(nv->model_number), "QLA23xx");
2671                }
2672        } else if (IS_QLA2200(ha)) {
2673                nv->firmware_options[0] |= BIT_2;
2674                /*
2675                 * 'Point-to-point preferred, else loop' is not a safe
2676                 * connection mode setting.
2677                 */
2678                if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2679                    (BIT_5 | BIT_4)) {
2680                        /* Force 'loop preferred, else point-to-point'. */
2681                        nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2682                        nv->add_firmware_options[0] |= BIT_5;
2683                }
2684                strcpy(ha->model_number, "QLA22xx");
2685        } else /*if (IS_QLA2100(ha))*/ {
2686                strcpy(ha->model_number, "QLA2100");
2687        }
2688
2689        /*
2690         * Copy over NVRAM RISC parameter block to initialization control block.
2691         */
2692        dptr1 = (uint8_t *)icb;
2693        dptr2 = (uint8_t *)&nv->parameter_block_version;
2694        cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2695        while (cnt--)
2696                *dptr1++ = *dptr2++;
2697
2698        /* Copy 2nd half. */
2699        dptr1 = (uint8_t *)icb->add_firmware_options;
2700        cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2701        while (cnt--)
2702                *dptr1++ = *dptr2++;
2703
2704        /* Use alternate WWN? */
2705        if (nv->host_p[1] & BIT_7) {
2706                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2707                memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2708        }
2709
2710        /* Prepare nodename */
2711        if ((icb->firmware_options[1] & BIT_6) == 0) {
2712                /*
2713                 * Firmware will apply the following mask if the nodename was
2714                 * not provided.
2715                 */
2716                memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2717                icb->node_name[0] &= 0xF0;
2718        }
2719
2720        /*
2721         * Set host adapter parameters.
2722         */
2723
2724        /*
2725         * BIT_7 in the host-parameters section allows for modification to
2726         * internal driver logging.
2727         */
2728        if (nv->host_p[0] & BIT_7)
2729                ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2730        ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2731        /* Always load RISC code on non ISP2[12]00 chips. */
2732        if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2733                ha->flags.disable_risc_code_load = 0;
2734        ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2735        ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2736        ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2737        ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2738        ha->flags.disable_serdes = 0;
2739
2740        ha->operating_mode =
2741            (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2742
2743        memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2744            sizeof(ha->fw_seriallink_options));
2745
2746        /* save HBA serial number */
2747        ha->serial0 = icb->port_name[5];
2748        ha->serial1 = icb->port_name[6];
2749        ha->serial2 = icb->port_name[7];
2750        memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2751        memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2752
2753        icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2754
2755        ha->retry_count = nv->retry_count;
2756
2757        /* Set minimum login_timeout to 4 seconds. */
2758        if (nv->login_timeout != ql2xlogintimeout)
2759                nv->login_timeout = ql2xlogintimeout;
2760        if (nv->login_timeout < 4)
2761                nv->login_timeout = 4;
2762        ha->login_timeout = nv->login_timeout;
2763        icb->login_timeout = nv->login_timeout;
2764
2765        /* Set minimum RATOV to 100 tenths of a second. */
2766        ha->r_a_tov = 100;
2767
2768        ha->loop_reset_delay = nv->reset_delay;
2769
2770        /* Link Down Timeout = 0:
2771         *
2772         *      When Port Down timer expires we will start returning
2773         *      I/O's to OS with "DID_NO_CONNECT".
2774         *
2775         * Link Down Timeout != 0:
2776         *
2777         *       The driver waits for the link to come up after link down
2778         *       before returning I/Os to OS with "DID_NO_CONNECT".
2779         */
2780        if (nv->link_down_timeout == 0) {
2781                ha->loop_down_abort_time =
2782                    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2783        } else {
2784                ha->link_down_timeout =  nv->link_down_timeout;
2785                ha->loop_down_abort_time =
2786                    (LOOP_DOWN_TIME - ha->link_down_timeout);
2787        }
2788
2789        /*
2790         * Need enough time to try and get the port back.
2791         */
2792        ha->port_down_retry_count = nv->port_down_retry_count;
2793        if (qlport_down_retry)
2794                ha->port_down_retry_count = qlport_down_retry;
2795        /* Set login_retry_count */
2796        ha->login_retry_count  = nv->retry_count;
2797        if (ha->port_down_retry_count == nv->port_down_retry_count &&
2798            ha->port_down_retry_count > 3)
2799                ha->login_retry_count = ha->port_down_retry_count;
2800        else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2801                ha->login_retry_count = ha->port_down_retry_count;
2802        if (ql2xloginretrycount)
2803                ha->login_retry_count = ql2xloginretrycount;
2804
2805        icb->lun_enables = __constant_cpu_to_le16(0);
2806        icb->command_resource_count = 0;
2807        icb->immediate_notify_resource_count = 0;
2808        icb->timeout = __constant_cpu_to_le16(0);
2809
2810        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2811                /* Enable RIO */
2812                icb->firmware_options[0] &= ~BIT_3;
2813                icb->add_firmware_options[0] &=
2814                    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2815                icb->add_firmware_options[0] |= BIT_2;
2816                icb->response_accumulation_timer = 3;
2817                icb->interrupt_delay_timer = 5;
2818
2819                vha->flags.process_response_queue = 1;
2820        } else {
2821                /* Enable ZIO. */
2822                if (!vha->flags.init_done) {
2823                        ha->zio_mode = icb->add_firmware_options[0] &
2824                            (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2825                        ha->zio_timer = icb->interrupt_delay_timer ?
2826                            icb->interrupt_delay_timer: 2;
2827                }
2828                icb->add_firmware_options[0] &=
2829                    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2830                vha->flags.process_response_queue = 0;
2831                if (ha->zio_mode != QLA_ZIO_DISABLED) {
2832                        ha->zio_mode = QLA_ZIO_MODE_6;
2833
2834                        ql_log(ql_log_info, vha, 0x0068,
2835                            "ZIO mode %d enabled; timer delay (%d us).\n",
2836                            ha->zio_mode, ha->zio_timer * 100);
2837
2838                        icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2839                        icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2840                        vha->flags.process_response_queue = 1;
2841                }
2842        }
2843
2844        if (rval) {
2845                ql_log(ql_log_warn, vha, 0x0069,
2846                    "NVRAM configuration failed.\n");
2847        }
2848        return (rval);
2849}
2850
2851static void
2852qla2x00_rport_del(void *data)
2853{
2854        fc_port_t *fcport = data;
2855        struct fc_rport *rport;
2856        scsi_qla_host_t *vha = fcport->vha;
2857        unsigned long flags;
2858
2859        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2860        rport = fcport->drport ? fcport->drport: fcport->rport;
2861        fcport->drport = NULL;
2862        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2863        if (rport) {
2864                fc_remote_port_delete(rport);
2865                /*
2866                 * Release the target mode FC NEXUS in qla_target.c code
2867                 * if target mod is enabled.
2868                 */
2869                qlt_fc_port_deleted(vha, fcport);
2870        }
2871}
2872
2873/**
2874 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2875 * @ha: HA context
2876 * @flags: allocation flags
2877 *
2878 * Returns a pointer to the allocated fcport, or NULL, if none available.
2879 */
2880fc_port_t *
2881qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2882{
2883        fc_port_t *fcport;
2884
2885        fcport = kzalloc(sizeof(fc_port_t), flags);
2886        if (!fcport)
2887                return NULL;
2888
2889        /* Setup fcport template structure. */
2890        fcport->vha = vha;
2891        fcport->port_type = FCT_UNKNOWN;
2892        fcport->loop_id = FC_NO_LOOP_ID;
2893        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2894        fcport->supported_classes = FC_COS_UNSPECIFIED;
2895
2896        return fcport;
2897}
2898
2899/*
2900 * qla2x00_configure_loop
2901 *      Updates Fibre Channel Device Database with what is actually on loop.
2902 *
2903 * Input:
2904 *      ha                = adapter block pointer.
2905 *
2906 * Returns:
2907 *      0 = success.
2908 *      1 = error.
2909 *      2 = database was full and device was not configured.
2910 */
2911static int
2912qla2x00_configure_loop(scsi_qla_host_t *vha)
2913{
2914        int  rval;
2915        unsigned long flags, save_flags;
2916        struct qla_hw_data *ha = vha->hw;
2917        rval = QLA_SUCCESS;
2918
2919        /* Get Initiator ID */
2920        if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2921                rval = qla2x00_configure_hba(vha);
2922                if (rval != QLA_SUCCESS) {
2923                        ql_dbg(ql_dbg_disc, vha, 0x2013,
2924                            "Unable to configure HBA.\n");
2925                        return (rval);
2926                }
2927        }
2928
2929        save_flags = flags = vha->dpc_flags;
2930        ql_dbg(ql_dbg_disc, vha, 0x2014,
2931            "Configure loop -- dpc flags = 0x%lx.\n", flags);
2932
2933        /*
2934         * If we have both an RSCN and PORT UPDATE pending then handle them
2935         * both at the same time.
2936         */
2937        clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2938        clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2939
2940        qla2x00_get_data_rate(vha);
2941
2942        /* Determine what we need to do */
2943        if (ha->current_topology == ISP_CFG_FL &&
2944            (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2945
2946                set_bit(RSCN_UPDATE, &flags);
2947
2948        } else if (ha->current_topology == ISP_CFG_F &&
2949            (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2950
2951                set_bit(RSCN_UPDATE, &flags);
2952                clear_bit(LOCAL_LOOP_UPDATE, &flags);
2953
2954        } else if (ha->current_topology == ISP_CFG_N) {
2955                clear_bit(RSCN_UPDATE, &flags);
2956
2957        } else if (!vha->flags.online ||
2958            (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2959
2960                set_bit(RSCN_UPDATE, &flags);
2961                set_bit(LOCAL_LOOP_UPDATE, &flags);
2962        }
2963
2964        if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2965                if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2966                        ql_dbg(ql_dbg_disc, vha, 0x2015,
2967                            "Loop resync needed, failing.\n");
2968                        rval = QLA_FUNCTION_FAILED;
2969                } else
2970                        rval = qla2x00_configure_local_loop(vha);
2971        }
2972
2973        if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2974                if (LOOP_TRANSITION(vha)) {
2975                        ql_dbg(ql_dbg_disc, vha, 0x201e,
2976                            "Needs RSCN update and loop transition.\n");
2977                        rval = QLA_FUNCTION_FAILED;
2978                }
2979                else
2980                        rval = qla2x00_configure_fabric(vha);
2981        }
2982
2983        if (rval == QLA_SUCCESS) {
2984                if (atomic_read(&vha->loop_down_timer) ||
2985                    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2986                        rval = QLA_FUNCTION_FAILED;
2987                } else {
2988                        atomic_set(&vha->loop_state, LOOP_READY);
2989                        ql_dbg(ql_dbg_disc, vha, 0x2069,
2990                            "LOOP READY.\n");
2991                }
2992        }
2993
2994        if (rval) {
2995                ql_dbg(ql_dbg_disc, vha, 0x206a,
2996                    "%s *** FAILED ***.\n", __func__);
2997        } else {
2998                ql_dbg(ql_dbg_disc, vha, 0x206b,
2999                    "%s: exiting normally.\n", __func__);
3000        }
3001
3002        /* Restore state if a resync event occurred during processing */
3003        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
3004                if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
3005                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3006                if (test_bit(RSCN_UPDATE, &save_flags)) {
3007                        set_bit(RSCN_UPDATE, &vha->dpc_flags);
3008                }
3009        }
3010
3011        return (rval);
3012}
3013
3014
3015
3016/*
3017 * qla2x00_configure_local_loop
3018 *      Updates Fibre Channel Device Database with local loop devices.
3019 *
3020 * Input:
3021 *      ha = adapter block pointer.
3022 *
3023 * Returns:
3024 *      0 = success.
3025 */
3026static int
3027qla2x00_configure_local_loop(scsi_qla_host_t *vha)
3028{
3029        int             rval, rval2;
3030        int             found_devs;
3031        int             found;
3032        fc_port_t       *fcport, *new_fcport;
3033
3034        uint16_t        index;
3035        uint16_t        entries;
3036        char            *id_iter;
3037        uint16_t        loop_id;
3038        uint8_t         domain, area, al_pa;
3039        struct qla_hw_data *ha = vha->hw;
3040
3041        found_devs = 0;
3042        new_fcport = NULL;
3043        entries = MAX_FIBRE_DEVICES_LOOP;
3044
3045        /* Get list of logged in devices. */
3046        memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
3047        rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
3048            &entries);
3049        if (rval != QLA_SUCCESS)
3050                goto cleanup_allocation;
3051
3052        ql_dbg(ql_dbg_disc, vha, 0x2017,
3053            "Entries in ID list (%d).\n", entries);
3054        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
3055            (uint8_t *)ha->gid_list,
3056            entries * sizeof(struct gid_list_info));
3057
3058        /* Allocate temporary fcport for any new fcports discovered. */
3059        new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3060        if (new_fcport == NULL) {
3061                ql_log(ql_log_warn, vha, 0x2018,
3062                    "Memory allocation failed for fcport.\n");
3063                rval = QLA_MEMORY_ALLOC_FAILED;
3064                goto cleanup_allocation;
3065        }
3066        new_fcport->flags &= ~FCF_FABRIC_DEVICE;
3067
3068        /*
3069         * Mark local devices that were present with FCF_DEVICE_LOST for now.
3070         */
3071        list_for_each_entry(fcport, &vha->vp_fcports, list) {
3072                if (atomic_read(&fcport->state) == FCS_ONLINE &&
3073                    fcport->port_type != FCT_BROADCAST &&
3074                    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3075
3076                        ql_dbg(ql_dbg_disc, vha, 0x2019,
3077                            "Marking port lost loop_id=0x%04x.\n",
3078                            fcport->loop_id);
3079
3080                        qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3081                }
3082        }
3083
3084        /* Add devices to port list. */
3085        id_iter = (char *)ha->gid_list;
3086        for (index = 0; index < entries; index++) {
3087                domain = ((struct gid_list_info *)id_iter)->domain;
3088                area = ((struct gid_list_info *)id_iter)->area;
3089                al_pa = ((struct gid_list_info *)id_iter)->al_pa;
3090                if (IS_QLA2100(ha) || IS_QLA2200(ha))
3091                        loop_id = (uint16_t)
3092                            ((struct gid_list_info *)id_iter)->loop_id_2100;
3093                else
3094                        loop_id = le16_to_cpu(
3095                            ((struct gid_list_info *)id_iter)->loop_id);
3096                id_iter += ha->gid_list_info_size;
3097
3098                /* Bypass reserved domain fields. */
3099                if ((domain & 0xf0) == 0xf0)
3100                        continue;
3101
3102                /* Bypass if not same domain and area of adapter. */
3103                if (area && domain &&
3104                    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
3105                        continue;
3106
3107                /* Bypass invalid local loop ID. */
3108                if (loop_id > LAST_LOCAL_LOOP_ID)
3109                        continue;
3110
3111                memset(new_fcport, 0, sizeof(fc_port_t));
3112
3113                /* Fill in member data. */
3114                new_fcport->d_id.b.domain = domain;
3115                new_fcport->d_id.b.area = area;
3116                new_fcport->d_id.b.al_pa = al_pa;
3117                new_fcport->loop_id = loop_id;
3118                rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
3119                if (rval2 != QLA_SUCCESS) {
3120                        ql_dbg(ql_dbg_disc, vha, 0x201a,
3121                            "Failed to retrieve fcport information "
3122                            "-- get_port_database=%x, loop_id=0x%04x.\n",
3123                            rval2, new_fcport->loop_id);
3124                        ql_dbg(ql_dbg_disc, vha, 0x201b,
3125                            "Scheduling resync.\n");
3126                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3127                        continue;
3128                }
3129
3130                /* Check for matching device in port list. */
3131                found = 0;
3132                fcport = NULL;
3133                list_for_each_entry(fcport, &vha->vp_fcports, list) {
3134                        if (memcmp(new_fcport->port_name, fcport->port_name,
3135                            WWN_SIZE))
3136                                continue;
3137
3138                        fcport->flags &= ~FCF_FABRIC_DEVICE;
3139                        fcport->loop_id = new_fcport->loop_id;
3140                        fcport->port_type = new_fcport->port_type;
3141                        fcport->d_id.b24 = new_fcport->d_id.b24;
3142                        memcpy(fcport->node_name, new_fcport->node_name,
3143                            WWN_SIZE);
3144
3145                        found++;
3146                        break;
3147                }
3148
3149                if (!found) {
3150                        /* New device, add to fcports list. */
3151                        list_add_tail(&new_fcport->list, &vha->vp_fcports);
3152
3153                        /* Allocate a new replacement fcport. */
3154                        fcport = new_fcport;
3155                        new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3156                        if (new_fcport == NULL) {
3157                                ql_log(ql_log_warn, vha, 0x201c,
3158                                    "Failed to allocate memory for fcport.\n");
3159                                rval = QLA_MEMORY_ALLOC_FAILED;
3160                                goto cleanup_allocation;
3161                        }
3162                        new_fcport->flags &= ~FCF_FABRIC_DEVICE;
3163                }
3164
3165                /* Base iIDMA settings on HBA port speed. */
3166                fcport->fp_speed = ha->link_data_rate;
3167
3168                qla2x00_update_fcport(vha, fcport);
3169
3170                found_devs++;
3171        }
3172
3173cleanup_allocation:
3174        kfree(new_fcport);
3175
3176        if (rval != QLA_SUCCESS) {
3177                ql_dbg(ql_dbg_disc, vha, 0x201d,
3178                    "Configure local loop error exit: rval=%x.\n", rval);
3179        }
3180
3181        return (rval);
3182}
3183
3184static void
3185qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3186{
3187        int rval;
3188        uint16_t mb[4];
3189        struct qla_hw_data *ha = vha->hw;
3190
3191        if (!IS_IIDMA_CAPABLE(ha))
3192                return;
3193
3194        if (atomic_read(&fcport->state) != FCS_ONLINE)
3195                return;
3196
3197        if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
3198            fcport->fp_speed > ha->link_data_rate)
3199                return;
3200
3201        rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
3202            mb);
3203        if (rval != QLA_SUCCESS) {
3204                ql_dbg(ql_dbg_disc, vha, 0x2004,
3205                    "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
3206                    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
3207        } else {
3208                ql_dbg(ql_dbg_disc, vha, 0x2005,
3209                    "iIDMA adjusted to %s GB/s on %8phN.\n",
3210                    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
3211                    fcport->port_name);
3212        }
3213}
3214
3215static void
3216qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3217{
3218        struct fc_rport_identifiers rport_ids;
3219        struct fc_rport *rport;
3220        unsigned long flags;
3221
3222        qla2x00_rport_del(fcport);
3223
3224        rport_ids.node_name = wwn_to_u64(fcport->node_name);
3225        rport_ids.port_name = wwn_to_u64(fcport->port_name);
3226        rport_ids.port_id = fcport->d_id.b.domain << 16 |
3227            fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
3228        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3229        fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
3230        if (!rport) {
3231                ql_log(ql_log_warn, vha, 0x2006,
3232                    "Unable to allocate fc remote port.\n");
3233                return;
3234        }
3235        /*
3236         * Create target mode FC NEXUS in qla_target.c if target mode is
3237         * enabled..
3238         */
3239        qlt_fc_port_added(vha, fcport);
3240
3241        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
3242        *((fc_port_t **)rport->dd_data) = fcport;
3243        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
3244
3245        rport->supported_classes = fcport->supported_classes;
3246
3247        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3248        if (fcport->port_type == FCT_INITIATOR)
3249                rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3250        if (fcport->port_type == FCT_TARGET)
3251                rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
3252        fc_remote_port_rolechg(rport, rport_ids.roles);
3253}
3254
3255/*
3256 * qla2x00_update_fcport
3257 *      Updates device on list.
3258 *
3259 * Input:
3260 *      ha = adapter block pointer.
3261 *      fcport = port structure pointer.
3262 *
3263 * Return:
3264 *      0  - Success
3265 *  BIT_0 - error
3266 *
3267 * Context:
3268 *      Kernel context.
3269 */
3270void
3271qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3272{
3273        fcport->vha = vha;
3274
3275        if (IS_QLAFX00(vha->hw)) {
3276                qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3277                qla2x00_reg_remote_port(vha, fcport);
3278                return;
3279        }
3280        fcport->login_retry = 0;
3281        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3282
3283        qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3284        qla2x00_iidma_fcport(vha, fcport);
3285        qla24xx_update_fcport_fcp_prio(vha, fcport);
3286        qla2x00_reg_remote_port(vha, fcport);
3287}
3288
3289/*
3290 * qla2x00_configure_fabric
3291 *      Setup SNS devices with loop ID's.
3292 *
3293 * Input:
3294 *      ha = adapter block pointer.
3295 *
3296 * Returns:
3297 *      0 = success.
3298 *      BIT_0 = error
3299 */
3300static int
3301qla2x00_configure_fabric(scsi_qla_host_t *vha)
3302{
3303        int     rval;
3304        fc_port_t       *fcport, *fcptemp;
3305        uint16_t        next_loopid;
3306        uint16_t        mb[MAILBOX_REGISTER_COUNT];
3307        uint16_t        loop_id;
3308        LIST_HEAD(new_fcports);
3309        struct qla_hw_data *ha = vha->hw;
3310        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3311
3312        /* If FL port exists, then SNS is present */
3313        if (IS_FWI2_CAPABLE(ha))
3314                loop_id = NPH_F_PORT;
3315        else
3316                loop_id = SNS_FL_PORT;
3317        rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
3318        if (rval != QLA_SUCCESS) {
3319                ql_dbg(ql_dbg_disc, vha, 0x201f,
3320                    "MBX_GET_PORT_NAME failed, No FL Port.\n");
3321
3322                vha->device_flags &= ~SWITCH_FOUND;
3323                return (QLA_SUCCESS);
3324        }
3325        vha->device_flags |= SWITCH_FOUND;
3326
3327        do {
3328                /* FDMI support. */
3329                if (ql2xfdmienable &&
3330                    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
3331                        qla2x00_fdmi_register(vha);
3332
3333                /* Ensure we are logged into the SNS. */
3334                if (IS_FWI2_CAPABLE(ha))
3335                        loop_id = NPH_SNS;
3336                else
3337                        loop_id = SIMPLE_NAME_SERVER;
3338                rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3339                    0xfc, mb, BIT_1|BIT_0);
3340                if (rval != QLA_SUCCESS) {
3341                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3342                        return rval;
3343                }
3344                if (mb[0] != MBS_COMMAND_COMPLETE) {
3345                        ql_dbg(ql_dbg_disc, vha, 0x2042,
3346                            "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3347                            "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3348                            mb[2], mb[6], mb[7]);
3349                        return (QLA_SUCCESS);
3350                }
3351
3352                if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3353                        if (qla2x00_rft_id(vha)) {
3354                                /* EMPTY */
3355                                ql_dbg(ql_dbg_disc, vha, 0x2045,
3356                                    "Register FC-4 TYPE failed.\n");
3357                        }
3358                        if (qla2x00_rff_id(vha)) {
3359                                /* EMPTY */
3360                                ql_dbg(ql_dbg_disc, vha, 0x2049,
3361                                    "Register FC-4 Features failed.\n");
3362                        }
3363                        if (qla2x00_rnn_id(vha)) {
3364                                /* EMPTY */
3365                                ql_dbg(ql_dbg_disc, vha, 0x204f,
3366                                    "Register Node Name failed.\n");
3367                        } else if (qla2x00_rsnn_nn(vha)) {
3368                                /* EMPTY */
3369                                ql_dbg(ql_dbg_disc, vha, 0x2053,
3370                                    "Register Symobilic Node Name failed.\n");
3371                        }
3372                }
3373
3374#define QLA_FCPORT_SCAN         1
3375#define QLA_FCPORT_FOUND        2
3376
3377                list_for_each_entry(fcport, &vha->vp_fcports, list) {
3378                        fcport->scan_state = QLA_FCPORT_SCAN;
3379                }
3380
3381                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3382                if (rval != QLA_SUCCESS)
3383                        break;
3384
3385                /*
3386                 * Logout all previous fabric devices marked lost, except
3387                 * FCP2 devices.
3388                 */
3389                list_for_each_entry(fcport, &vha->vp_fcports, list) {
3390                        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3391                                break;
3392
3393                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3394                                continue;
3395
3396                        if (fcport->scan_state == QLA_FCPORT_SCAN &&
3397                            atomic_read(&fcport->state) == FCS_ONLINE) {
3398                                qla2x00_mark_device_lost(vha, fcport,
3399                                    ql2xplogiabsentdevice, 0);
3400                                if (fcport->loop_id != FC_NO_LOOP_ID &&
3401                                    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3402                                    fcport->port_type != FCT_INITIATOR &&
3403                                    fcport->port_type != FCT_BROADCAST) {
3404                                        ha->isp_ops->fabric_logout(vha,
3405                                            fcport->loop_id,
3406                                            fcport->d_id.b.domain,
3407                                            fcport->d_id.b.area,
3408                                            fcport->d_id.b.al_pa);
3409                                        fcport->loop_id = FC_NO_LOOP_ID;
3410                                }
3411                        }
3412                }
3413
3414                /* Starting free loop ID. */
3415                next_loopid = ha->min_external_loopid;
3416
3417                /*
3418                 * Scan through our port list and login entries that need to be
3419                 * logged in.
3420                 */
3421                list_for_each_entry(fcport, &vha->vp_fcports, list) {
3422                        if (atomic_read(&vha->loop_down_timer) ||
3423                            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3424                                break;
3425
3426                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3427                            (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3428                                continue;
3429
3430                        if (fcport->loop_id == FC_NO_LOOP_ID) {
3431                                fcport->loop_id = next_loopid;
3432                                rval = qla2x00_find_new_loop_id(
3433                                    base_vha, fcport);
3434                                if (rval != QLA_SUCCESS) {
3435                                        /* Ran out of IDs to use */
3436                                        break;
3437                                }
3438                        }
3439                        /* Login and update database */
3440                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3441                }
3442
3443                /* Exit if out of loop IDs. */
3444                if (rval != QLA_SUCCESS) {
3445                        break;
3446                }
3447
3448                /*
3449                 * Login and add the new devices to our port list.
3450                 */
3451                list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3452                        if (atomic_read(&vha->loop_down_timer) ||
3453                            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3454                                break;
3455
3456                        /* Find a new loop ID to use. */
3457                        fcport->loop_id = next_loopid;
3458                        rval = qla2x00_find_new_loop_id(base_vha, fcport);
3459                        if (rval != QLA_SUCCESS) {
3460                                /* Ran out of IDs to use */
3461                                break;
3462                        }
3463
3464                        /* Login and update database */
3465                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3466
3467                        list_move_tail(&fcport->list, &vha->vp_fcports);
3468                }
3469        } while (0);
3470
3471        /* Free all new device structures not processed. */
3472        list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3473                list_del(&fcport->list);
3474                kfree(fcport);
3475        }
3476
3477        if (rval) {
3478                ql_dbg(ql_dbg_disc, vha, 0x2068,
3479                    "Configure fabric error exit rval=%d.\n", rval);
3480        }
3481
3482        return (rval);
3483}
3484
3485/*
3486 * qla2x00_find_all_fabric_devs
3487 *
3488 * Input:
3489 *      ha = adapter block pointer.
3490 *      dev = database device entry pointer.
3491 *
3492 * Returns:
3493 *      0 = success.
3494 *
3495 * Context:
3496 *      Kernel context.
3497 */
3498static int
3499qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3500        struct list_head *new_fcports)
3501{
3502        int             rval;
3503        uint16_t        loop_id;
3504        fc_port_t       *fcport, *new_fcport, *fcptemp;
3505        int             found;
3506
3507        sw_info_t       *swl;
3508        int             swl_idx;
3509        int             first_dev, last_dev;
3510        port_id_t       wrap = {}, nxt_d_id;
3511        struct qla_hw_data *ha = vha->hw;
3512        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3513
3514        rval = QLA_SUCCESS;
3515
3516        /* Try GID_PT to get device list, else GAN. */
3517        if (!ha->swl)
3518                ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
3519                    GFP_KERNEL);
3520        swl = ha->swl;
3521        if (!swl) {
3522                /*EMPTY*/
3523                ql_dbg(ql_dbg_disc, vha, 0x2054,
3524                    "GID_PT allocations failed, fallback on GA_NXT.\n");
3525        } else {
3526                memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
3527                if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3528                        swl = NULL;
3529                } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3530                        swl = NULL;
3531                } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3532                        swl = NULL;
3533                } else if (ql2xiidmaenable &&
3534                    qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3535                        qla2x00_gpsc(vha, swl);
3536                }
3537
3538                /* If other queries succeeded probe for FC-4 type */
3539                if (swl)
3540                        qla2x00_gff_id(vha, swl);
3541        }
3542        swl_idx = 0;
3543
3544        /* Allocate temporary fcport for any new fcports discovered. */
3545        new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3546        if (new_fcport == NULL) {
3547                ql_log(ql_log_warn, vha, 0x205e,
3548                    "Failed to allocate memory for fcport.\n");
3549                return (QLA_MEMORY_ALLOC_FAILED);
3550        }
3551        new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3552        /* Set start port ID scan at adapter ID. */
3553        first_dev = 1;
3554        last_dev = 0;
3555
3556        /* Starting free loop ID. */
3557        loop_id = ha->min_external_loopid;
3558        for (; loop_id <= ha->max_loop_id; loop_id++) {
3559                if (qla2x00_is_reserved_id(vha, loop_id))
3560                        continue;
3561
3562                if (ha->current_topology == ISP_CFG_FL &&
3563                    (atomic_read(&vha->loop_down_timer) ||
3564                     LOOP_TRANSITION(vha))) {
3565                        atomic_set(&vha->loop_down_timer, 0);
3566                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3567                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3568                        break;
3569                }
3570
3571                if (swl != NULL) {
3572                        if (last_dev) {
3573                                wrap.b24 = new_fcport->d_id.b24;
3574                        } else {
3575                                new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3576                                memcpy(new_fcport->node_name,
3577                                    swl[swl_idx].node_name, WWN_SIZE);
3578                                memcpy(new_fcport->port_name,
3579                                    swl[swl_idx].port_name, WWN_SIZE);
3580                                memcpy(new_fcport->fabric_port_name,
3581                                    swl[swl_idx].fabric_port_name, WWN_SIZE);
3582                                new_fcport->fp_speed = swl[swl_idx].fp_speed;
3583                                new_fcport->fc4_type = swl[swl_idx].fc4_type;
3584
3585                                if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3586                                        last_dev = 1;
3587                                }
3588                                swl_idx++;
3589                        }
3590                } else {
3591                        /* Send GA_NXT to the switch */
3592                        rval = qla2x00_ga_nxt(vha, new_fcport);
3593                        if (rval != QLA_SUCCESS) {
3594                                ql_log(ql_log_warn, vha, 0x2064,
3595                                    "SNS scan failed -- assuming "
3596                                    "zero-entry result.\n");
3597                                list_for_each_entry_safe(fcport, fcptemp,
3598                                    new_fcports, list) {
3599                                        list_del(&fcport->list);
3600                                        kfree(fcport);
3601                                }
3602                                rval = QLA_SUCCESS;
3603                                break;
3604                        }
3605                }
3606
3607                /* If wrap on switch device list, exit. */
3608                if (first_dev) {
3609                        wrap.b24 = new_fcport->d_id.b24;
3610                        first_dev = 0;
3611                } else if (new_fcport->d_id.b24 == wrap.b24) {
3612                        ql_dbg(ql_dbg_disc, vha, 0x2065,
3613                            "Device wrap (%02x%02x%02x).\n",
3614                            new_fcport->d_id.b.domain,
3615                            new_fcport->d_id.b.area,
3616                            new_fcport->d_id.b.al_pa);
3617                        break;
3618                }
3619
3620                /* Bypass if same physical adapter. */
3621                if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3622                        continue;
3623
3624                /* Bypass virtual ports of the same host. */
3625                if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
3626                        continue;
3627
3628                /* Bypass if same domain and area of adapter. */
3629                if (((new_fcport->d_id.b24 & 0xffff00) ==
3630                    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3631                        ISP_CFG_FL)
3632                            continue;
3633
3634                /* Bypass reserved domain fields. */
3635                if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3636                        continue;
3637
3638                /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3639                if (ql2xgffidenable &&
3640                    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3641                    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3642                        continue;
3643
3644                /* Locate matching device in database. */
3645                found = 0;
3646                list_for_each_entry(fcport, &vha->vp_fcports, list) {
3647                        if (memcmp(new_fcport->port_name, fcport->port_name,
3648                            WWN_SIZE))
3649                                continue;
3650
3651                        fcport->scan_state = QLA_FCPORT_FOUND;
3652
3653                        found++;
3654
3655                        /* Update port state. */
3656                        memcpy(fcport->fabric_port_name,
3657                            new_fcport->fabric_port_name, WWN_SIZE);
3658                        fcport->fp_speed = new_fcport->fp_speed;
3659
3660                        /*
3661                         * If address the same and state FCS_ONLINE, nothing
3662                         * changed.
3663                         */
3664                        if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3665                            atomic_read(&fcport->state) == FCS_ONLINE) {
3666                                break;
3667                        }
3668
3669                        /*
3670                         * If device was not a fabric device before.
3671                         */
3672                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3673                                fcport->d_id.b24 = new_fcport->d_id.b24;
3674                                qla2x00_clear_loop_id(fcport);
3675                                fcport->flags |= (FCF_FABRIC_DEVICE |
3676                                    FCF_LOGIN_NEEDED);
3677                                break;
3678                        }
3679
3680                        /*
3681                         * Port ID changed or device was marked to be updated;
3682                         * Log it out if still logged in and mark it for
3683                         * relogin later.
3684                         */
3685                        fcport->d_id.b24 = new_fcport->d_id.b24;
3686                        fcport->flags |= FCF_LOGIN_NEEDED;
3687                        if (fcport->loop_id != FC_NO_LOOP_ID &&
3688                            (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3689                            (fcport->flags & FCF_ASYNC_SENT) == 0 &&
3690                            fcport->port_type != FCT_INITIATOR &&
3691                            fcport->port_type != FCT_BROADCAST) {
3692                                ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3693                                    fcport->d_id.b.domain, fcport->d_id.b.area,
3694                                    fcport->d_id.b.al_pa);
3695                                qla2x00_clear_loop_id(fcport);
3696                        }
3697
3698                        break;
3699                }
3700
3701                if (found)
3702                        continue;
3703                /* If device was not in our fcports list, then add it. */
3704                list_add_tail(&new_fcport->list, new_fcports);
3705
3706                /* Allocate a new replacement fcport. */
3707                nxt_d_id.b24 = new_fcport->d_id.b24;
3708                new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3709                if (new_fcport == NULL) {
3710                        ql_log(ql_log_warn, vha, 0x2066,
3711                            "Memory allocation failed for fcport.\n");
3712                        return (QLA_MEMORY_ALLOC_FAILED);
3713                }
3714                new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3715                new_fcport->d_id.b24 = nxt_d_id.b24;
3716        }
3717
3718        kfree(new_fcport);
3719
3720        return (rval);
3721}
3722
3723/*
3724 * qla2x00_find_new_loop_id
3725 *      Scan through our port list and find a new usable loop ID.
3726 *
3727 * Input:
3728 *      ha:     adapter state pointer.
3729 *      dev:    port structure pointer.
3730 *
3731 * Returns:
3732 *      qla2x00 local function return status code.
3733 *
3734 * Context:
3735 *      Kernel context.
3736 */
3737int
3738qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3739{
3740        int     rval;
3741        struct qla_hw_data *ha = vha->hw;
3742        unsigned long flags = 0;
3743
3744        rval = QLA_SUCCESS;
3745
3746        spin_lock_irqsave(&ha->vport_slock, flags);
3747
3748        dev->loop_id = find_first_zero_bit(ha->loop_id_map,
3749            LOOPID_MAP_SIZE);
3750        if (dev->loop_id >= LOOPID_MAP_SIZE ||
3751            qla2x00_is_reserved_id(vha, dev->loop_id)) {
3752                dev->loop_id = FC_NO_LOOP_ID;
3753                rval = QLA_FUNCTION_FAILED;
3754        } else
3755                set_bit(dev->loop_id, ha->loop_id_map);
3756
3757        spin_unlock_irqrestore(&ha->vport_slock, flags);
3758
3759        if (rval == QLA_SUCCESS)
3760                ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3761                    "Assigning new loopid=%x, portid=%x.\n",
3762                    dev->loop_id, dev->d_id.b24);
3763        else
3764                ql_log(ql_log_warn, dev->vha, 0x2087,
3765                    "No loop_id's available, portid=%x.\n",
3766                    dev->d_id.b24);
3767
3768        return (rval);
3769}
3770
3771/*
3772 * qla2x00_fabric_dev_login
3773 *      Login fabric target device and update FC port database.
3774 *
3775 * Input:
3776 *      ha:             adapter state pointer.
3777 *      fcport:         port structure list pointer.
3778 *      next_loopid:    contains value of a new loop ID that can be used
3779 *                      by the next login attempt.
3780 *
3781 * Returns:
3782 *      qla2x00 local function return status code.
3783 *
3784 * Context:
3785 *      Kernel context.
3786 */
3787static int
3788qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3789    uint16_t *next_loopid)
3790{
3791        int     rval;
3792        int     retry;
3793        uint8_t opts;
3794        struct qla_hw_data *ha = vha->hw;
3795
3796        rval = QLA_SUCCESS;
3797        retry = 0;
3798
3799        if (IS_ALOGIO_CAPABLE(ha)) {
3800                if (fcport->flags & FCF_ASYNC_SENT)
3801                        return rval;
3802                fcport->flags |= FCF_ASYNC_SENT;
3803                rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3804                if (!rval)
3805                        return rval;
3806        }
3807
3808        fcport->flags &= ~FCF_ASYNC_SENT;
3809        rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3810        if (rval == QLA_SUCCESS) {
3811                /* Send an ADISC to FCP2 devices.*/
3812                opts = 0;
3813                if (fcport->flags & FCF_FCP2_DEVICE)
3814                        opts |= BIT_1;
3815                rval = qla2x00_get_port_database(vha, fcport, opts);
3816                if (rval != QLA_SUCCESS) {
3817                        ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3818                            fcport->d_id.b.domain, fcport->d_id.b.area,
3819                            fcport->d_id.b.al_pa);
3820                        qla2x00_mark_device_lost(vha, fcport, 1, 0);
3821                } else {
3822                        qla2x00_update_fcport(vha, fcport);
3823                }
3824        } else {
3825                /* Retry Login. */
3826                qla2x00_mark_device_lost(vha, fcport, 1, 0);
3827        }
3828
3829        return (rval);
3830}
3831
3832/*
3833 * qla2x00_fabric_login
3834 *      Issue fabric login command.
3835 *
3836 * Input:
3837 *      ha = adapter block pointer.
3838 *      device = pointer to FC device type structure.
3839 *
3840 * Returns:
3841 *      0 - Login successfully
3842 *      1 - Login failed
3843 *      2 - Initiator device
3844 *      3 - Fatal error
3845 */
3846int
3847qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3848    uint16_t *next_loopid)
3849{
3850        int     rval;
3851        int     retry;
3852        uint16_t tmp_loopid;
3853        uint16_t mb[MAILBOX_REGISTER_COUNT];
3854        struct qla_hw_data *ha = vha->hw;
3855
3856        retry = 0;
3857        tmp_loopid = 0;
3858
3859        for (;;) {
3860                ql_dbg(ql_dbg_disc, vha, 0x2000,
3861                    "Trying Fabric Login w/loop id 0x%04x for port "
3862                    "%02x%02x%02x.\n",
3863                    fcport->loop_id, fcport->d_id.b.domain,
3864                    fcport->d_id.b.area, fcport->d_id.b.al_pa);
3865
3866                /* Login fcport on switch. */
3867                rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
3868                    fcport->d_id.b.domain, fcport->d_id.b.area,
3869                    fcport->d_id.b.al_pa, mb, BIT_0);
3870                if (rval != QLA_SUCCESS) {
3871                        return rval;
3872                }
3873                if (mb[0] == MBS_PORT_ID_USED) {
3874                        /*
3875                         * Device has another loop ID.  The firmware team
3876                         * recommends the driver perform an implicit login with
3877                         * the specified ID again. The ID we just used is save
3878                         * here so we return with an ID that can be tried by
3879                         * the next login.
3880                         */
3881                        retry++;
3882                        tmp_loopid = fcport->loop_id;
3883                        fcport->loop_id = mb[1];
3884
3885                        ql_dbg(ql_dbg_disc, vha, 0x2001,
3886                            "Fabric Login: port in use - next loop "
3887                            "id=0x%04x, port id= %02x%02x%02x.\n",
3888                            fcport->loop_id, fcport->d_id.b.domain,
3889                            fcport->d_id.b.area, fcport->d_id.b.al_pa);
3890
3891                } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3892                        /*
3893                         * Login succeeded.
3894                         */
3895                        if (retry) {
3896                                /* A retry occurred before. */
3897                                *next_loopid = tmp_loopid;
3898                        } else {
3899                                /*
3900                                 * No retry occurred before. Just increment the
3901                                 * ID value for next login.
3902                                 */
3903                                *next_loopid = (fcport->loop_id + 1);
3904                        }
3905
3906                        if (mb[1] & BIT_0) {
3907                                fcport->port_type = FCT_INITIATOR;
3908                        } else {
3909                                fcport->port_type = FCT_TARGET;
3910                                if (mb[1] & BIT_1) {
3911                                        fcport->flags |= FCF_FCP2_DEVICE;
3912                                }
3913                        }
3914
3915                        if (mb[10] & BIT_0)
3916                                fcport->supported_classes |= FC_COS_CLASS2;
3917                        if (mb[10] & BIT_1)
3918                                fcport->supported_classes |= FC_COS_CLASS3;
3919
3920                        if (IS_FWI2_CAPABLE(ha)) {
3921                                if (mb[10] & BIT_7)
3922                                        fcport->flags |=
3923                                            FCF_CONF_COMP_SUPPORTED;
3924                        }
3925
3926                        rval = QLA_SUCCESS;
3927                        break;
3928                } else if (mb[0] == MBS_LOOP_ID_USED) {
3929                        /*
3930                         * Loop ID already used, try next loop ID.
3931                         */
3932                        fcport->loop_id++;
3933                        rval = qla2x00_find_new_loop_id(vha, fcport);
3934                        if (rval != QLA_SUCCESS) {
3935                                /* Ran out of loop IDs to use */
3936                                break;
3937                        }
3938                } else if (mb[0] == MBS_COMMAND_ERROR) {
3939                        /*
3940                         * Firmware possibly timed out during login. If NO
3941                         * retries are left to do then the device is declared
3942                         * dead.
3943                         */
3944                        *next_loopid = fcport->loop_id;
3945                        ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3946                            fcport->d_id.b.domain, fcport->d_id.b.area,
3947                            fcport->d_id.b.al_pa);
3948                        qla2x00_mark_device_lost(vha, fcport, 1, 0);
3949
3950                        rval = 1;
3951                        break;
3952                } else {
3953                        /*
3954                         * unrecoverable / not handled error
3955                         */
3956                        ql_dbg(ql_dbg_disc, vha, 0x2002,
3957                            "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3958                            "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3959                            fcport->d_id.b.area, fcport->d_id.b.al_pa,
3960                            fcport->loop_id, jiffies);
3961
3962                        *next_loopid = fcport->loop_id;
3963                        ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3964                            fcport->d_id.b.domain, fcport->d_id.b.area,
3965                            fcport->d_id.b.al_pa);
3966                        qla2x00_clear_loop_id(fcport);
3967                        fcport->login_retry = 0;
3968
3969                        rval = 3;
3970                        break;
3971                }
3972        }
3973
3974        return (rval);
3975}
3976
3977/*
3978 * qla2x00_local_device_login
3979 *      Issue local device login command.
3980 *
3981 * Input:
3982 *      ha = adapter block pointer.
3983 *      loop_id = loop id of device to login to.
3984 *
3985 * Returns (Where's the #define!!!!):
3986 *      0 - Login successfully
3987 *      1 - Login failed
3988 *      3 - Fatal error
3989 */
3990int
3991qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3992{
3993        int             rval;
3994        uint16_t        mb[MAILBOX_REGISTER_COUNT];
3995
3996        memset(mb, 0, sizeof(mb));
3997        rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3998        if (rval == QLA_SUCCESS) {
3999                /* Interrogate mailbox registers for any errors */
4000                if (mb[0] == MBS_COMMAND_ERROR)
4001                        rval = 1;
4002                else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
4003                        /* device not in PCB table */
4004                        rval = 3;
4005        }
4006
4007        return (rval);
4008}
4009
4010/*
4011 *  qla2x00_loop_resync
4012 *      Resync with fibre channel devices.
4013 *
4014 * Input:
4015 *      ha = adapter block pointer.
4016 *
4017 * Returns:
4018 *      0 = success
4019 */
4020int
4021qla2x00_loop_resync(scsi_qla_host_t *vha)
4022{
4023        int rval = QLA_SUCCESS;
4024        uint32_t wait_time;
4025        struct req_que *req;
4026        struct rsp_que *rsp;
4027
4028        if (vha->hw->flags.cpu_affinity_enabled)
4029                req = vha->hw->req_q_map[0];
4030        else
4031                req = vha->req;
4032        rsp = req->rsp;
4033
4034        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4035        if (vha->flags.online) {
4036                if (!(rval = qla2x00_fw_ready(vha))) {
4037                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
4038                        wait_time = 256;
4039                        do {
4040                                if (!IS_QLAFX00(vha->hw)) {
4041                                        /*
4042                                         * Issue a marker after FW becomes
4043                                         * ready.
4044                                         */
4045                                        qla2x00_marker(vha, req, rsp, 0, 0,
4046                                                MK_SYNC_ALL);
4047                                        vha->marker_needed = 0;
4048                                }
4049
4050                                /* Remap devices on Loop. */
4051                                clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4052
4053                                if (IS_QLAFX00(vha->hw))
4054                                        qlafx00_configure_devices(vha);
4055                                else
4056                                        qla2x00_configure_loop(vha);
4057
4058                                wait_time--;
4059                        } while (!atomic_read(&vha->loop_down_timer) &&
4060                                !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4061                                && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4062                                &vha->dpc_flags)));
4063                }
4064        }
4065
4066        if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4067                return (QLA_FUNCTION_FAILED);
4068
4069        if (rval)
4070                ql_dbg(ql_dbg_disc, vha, 0x206c,
4071                    "%s *** FAILED ***.\n", __func__);
4072
4073        return (rval);
4074}
4075
4076/*
4077* qla2x00_perform_loop_resync
4078* Description: This function will set the appropriate flags and call
4079*              qla2x00_loop_resync. If successful loop will be resynced
4080* Arguments : scsi_qla_host_t pointer
4081* returm    : Success or Failure
4082*/
4083
4084int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
4085{
4086        int32_t rval = 0;
4087
4088        if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
4089                /*Configure the flags so that resync happens properly*/
4090                atomic_set(&ha->loop_down_timer, 0);
4091                if (!(ha->device_flags & DFLG_NO_CABLE)) {
4092                        atomic_set(&ha->loop_state, LOOP_UP);
4093                        set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
4094                        set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
4095                        set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
4096
4097                        rval = qla2x00_loop_resync(ha);
4098                } else
4099                        atomic_set(&ha->loop_state, LOOP_DEAD);
4100
4101                clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
4102        }
4103
4104        return rval;
4105}
4106
4107void
4108qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4109{
4110        fc_port_t *fcport;
4111        struct scsi_qla_host *vha;
4112        struct qla_hw_data *ha = base_vha->hw;
4113        unsigned long flags;
4114
4115        spin_lock_irqsave(&ha->vport_slock, flags);
4116        /* Go with deferred removal of rport references. */
4117        list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
4118                atomic_inc(&vha->vref_count);
4119                list_for_each_entry(fcport, &vha->vp_fcports, list) {
4120                        if (fcport->drport &&
4121                            atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4122                                spin_unlock_irqrestore(&ha->vport_slock, flags);
4123                                qla2x00_rport_del(fcport);
4124                                spin_lock_irqsave(&ha->vport_slock, flags);
4125                        }
4126                }
4127                atomic_dec(&vha->vref_count);
4128        }
4129        spin_unlock_irqrestore(&ha->vport_slock, flags);
4130}
4131
4132/* Assumes idc_lock always held on entry */
4133void
4134qla83xx_reset_ownership(scsi_qla_host_t *vha)
4135{
4136        struct qla_hw_data *ha = vha->hw;
4137        uint32_t drv_presence, drv_presence_mask;
4138        uint32_t dev_part_info1, dev_part_info2, class_type;
4139        uint32_t class_type_mask = 0x3;
4140        uint16_t fcoe_other_function = 0xffff, i;
4141
4142        if (IS_QLA8044(ha)) {
4143                drv_presence = qla8044_rd_direct(vha,
4144                    QLA8044_CRB_DRV_ACTIVE_INDEX);
4145                dev_part_info1 = qla8044_rd_direct(vha,
4146                    QLA8044_CRB_DEV_PART_INFO_INDEX);
4147                dev_part_info2 = qla8044_rd_direct(vha,
4148                    QLA8044_CRB_DEV_PART_INFO2);
4149        } else {
4150                qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4151                qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
4152                qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
4153        }
4154        for (i = 0; i < 8; i++) {
4155                class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
4156                if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4157                    (i != ha->portnum)) {
4158                        fcoe_other_function = i;
4159                        break;
4160                }
4161        }
4162        if (fcoe_other_function == 0xffff) {
4163                for (i = 0; i < 8; i++) {
4164                        class_type = ((dev_part_info2 >> (i * 4)) &
4165                            class_type_mask);
4166                        if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4167                            ((i + 8) != ha->portnum)) {
4168                                fcoe_other_function = i + 8;
4169                                break;
4170                        }
4171                }
4172        }
4173        /*
4174         * Prepare drv-presence mask based on fcoe functions present.
4175         * However consider only valid physical fcoe function numbers (0-15).
4176         */
4177        drv_presence_mask = ~((1 << (ha->portnum)) |
4178                        ((fcoe_other_function == 0xffff) ?
4179                         0 : (1 << (fcoe_other_function))));
4180
4181        /* We are the reset owner iff:
4182         *    - No other protocol drivers present.
4183         *    - This is the lowest among fcoe functions. */
4184        if (!(drv_presence & drv_presence_mask) &&
4185                        (ha->portnum < fcoe_other_function)) {
4186                ql_dbg(ql_dbg_p3p, vha, 0xb07f,
4187                    "This host is Reset owner.\n");
4188                ha->flags.nic_core_reset_owner = 1;
4189        }
4190}
4191
4192static int
4193__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
4194{
4195        int rval = QLA_SUCCESS;
4196        struct qla_hw_data *ha = vha->hw;
4197        uint32_t drv_ack;
4198
4199        rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4200        if (rval == QLA_SUCCESS) {
4201                drv_ack |= (1 << ha->portnum);
4202                rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4203        }
4204
4205        return rval;
4206}
4207
4208static int
4209__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
4210{
4211        int rval = QLA_SUCCESS;
4212        struct qla_hw_data *ha = vha->hw;
4213        uint32_t drv_ack;
4214
4215        rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4216        if (rval == QLA_SUCCESS) {
4217                drv_ack &= ~(1 << ha->portnum);
4218                rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4219        }
4220
4221        return rval;
4222}
4223
4224static const char *
4225qla83xx_dev_state_to_string(uint32_t dev_state)
4226{
4227        switch (dev_state) {
4228        case QLA8XXX_DEV_COLD:
4229                return "COLD/RE-INIT";
4230        case QLA8XXX_DEV_INITIALIZING:
4231                return "INITIALIZING";
4232        case QLA8XXX_DEV_READY:
4233                return "READY";
4234        case QLA8XXX_DEV_NEED_RESET:
4235                return "NEED RESET";
4236        case QLA8XXX_DEV_NEED_QUIESCENT:
4237                return "NEED QUIESCENT";
4238        case QLA8XXX_DEV_FAILED:
4239                return "FAILED";
4240        case QLA8XXX_DEV_QUIESCENT:
4241                return "QUIESCENT";
4242        default:
4243                return "Unknown";
4244        }
4245}
4246
4247/* Assumes idc-lock always held on entry */
4248void
4249qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
4250{
4251        struct qla_hw_data *ha = vha->hw;
4252        uint32_t idc_audit_reg = 0, duration_secs = 0;
4253
4254        switch (audit_type) {
4255        case IDC_AUDIT_TIMESTAMP:
4256                ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
4257                idc_audit_reg = (ha->portnum) |
4258                    (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
4259                qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4260                break;
4261
4262        case IDC_AUDIT_COMPLETION:
4263                duration_secs = ((jiffies_to_msecs(jiffies) -
4264                    jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
4265                idc_audit_reg = (ha->portnum) |
4266                    (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
4267                qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4268                break;
4269
4270        default:
4271                ql_log(ql_log_warn, vha, 0xb078,
4272                    "Invalid audit type specified.\n");
4273                break;
4274        }
4275}
4276
4277/* Assumes idc_lock always held on entry */
4278static int
4279qla83xx_initiating_reset(scsi_qla_host_t *vha)
4280{
4281        struct qla_hw_data *ha = vha->hw;
4282        uint32_t  idc_control, dev_state;
4283
4284        __qla83xx_get_idc_control(vha, &idc_control);
4285        if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
4286                ql_log(ql_log_info, vha, 0xb080,
4287                    "NIC Core reset has been disabled. idc-control=0x%x\n",
4288                    idc_control);
4289                return QLA_FUNCTION_FAILED;
4290        }
4291
4292        /* Set NEED-RESET iff in READY state and we are the reset-owner */
4293        qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4294        if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
4295                qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
4296                    QLA8XXX_DEV_NEED_RESET);
4297                ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
4298                qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
4299        } else {
4300                const char *state = qla83xx_dev_state_to_string(dev_state);
4301                ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
4302
4303                /* SV: XXX: Is timeout required here? */
4304                /* Wait for IDC state change READY -> NEED_RESET */
4305                while (dev_state == QLA8XXX_DEV_READY) {
4306                        qla83xx_idc_unlock(vha, 0);
4307                        msleep(200);
4308                        qla83xx_idc_lock(vha, 0);
4309                        qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4310                }
4311        }
4312
4313        /* Send IDC ack by writing to drv-ack register */
4314        __qla83xx_set_drv_ack(vha);
4315
4316        return QLA_SUCCESS;
4317}
4318
4319int
4320__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4321{
4322        return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4323}
4324
4325int
4326__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4327{
4328        return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4329}
4330
4331static int
4332qla83xx_check_driver_presence(scsi_qla_host_t *vha)
4333{
4334        uint32_t drv_presence = 0;
4335        struct qla_hw_data *ha = vha->hw;
4336
4337        qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4338        if (drv_presence & (1 << ha->portnum))
4339                return QLA_SUCCESS;
4340        else
4341                return QLA_TEST_FAILED;
4342}
4343
4344int
4345qla83xx_nic_core_reset(scsi_qla_host_t *vha)
4346{
4347        int rval = QLA_SUCCESS;
4348        struct qla_hw_data *ha = vha->hw;
4349
4350        ql_dbg(ql_dbg_p3p, vha, 0xb058,
4351            "Entered  %s().\n", __func__);
4352
4353        if (vha->device_flags & DFLG_DEV_FAILED) {
4354                ql_log(ql_log_warn, vha, 0xb059,
4355                    "Device in unrecoverable FAILED state.\n");
4356                return QLA_FUNCTION_FAILED;
4357        }
4358
4359        qla83xx_idc_lock(vha, 0);
4360
4361        if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
4362                ql_log(ql_log_warn, vha, 0xb05a,
4363                    "Function=0x%x has been removed from IDC participation.\n",
4364                    ha->portnum);
4365                rval = QLA_FUNCTION_FAILED;
4366                goto exit;
4367        }
4368
4369        qla83xx_reset_ownership(vha);
4370
4371        rval = qla83xx_initiating_reset(vha);
4372
4373        /*
4374         * Perform reset if we are the reset-owner,
4375         * else wait till IDC state changes to READY/FAILED.
4376         */
4377        if (rval == QLA_SUCCESS) {
4378                rval = qla83xx_idc_state_handler(vha);
4379
4380                if (rval == QLA_SUCCESS)
4381                        ha->flags.nic_core_hung = 0;
4382                __qla83xx_clear_drv_ack(vha);
4383        }
4384
4385exit:
4386        qla83xx_idc_unlock(vha, 0);
4387
4388        ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
4389
4390        return rval;
4391}
4392
4393int
4394qla2xxx_mctp_dump(scsi_qla_host_t *vha)
4395{
4396        struct qla_hw_data *ha = vha->hw;
4397        int rval = QLA_FUNCTION_FAILED;
4398
4399        if (!IS_MCTP_CAPABLE(ha)) {
4400                /* This message can be removed from the final version */
4401                ql_log(ql_log_info, vha, 0x506d,
4402                    "This board is not MCTP capable\n");
4403                return rval;
4404        }
4405
4406        if (!ha->mctp_dump) {
4407                ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
4408                    MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
4409
4410                if (!ha->mctp_dump) {
4411                        ql_log(ql_log_warn, vha, 0x506e,
4412                            "Failed to allocate memory for mctp dump\n");
4413                        return rval;
4414                }
4415        }
4416
4417#define MCTP_DUMP_STR_ADDR      0x00000000
4418        rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
4419            MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
4420        if (rval != QLA_SUCCESS) {
4421                ql_log(ql_log_warn, vha, 0x506f,
4422                    "Failed to capture mctp dump\n");
4423        } else {
4424                ql_log(ql_log_info, vha, 0x5070,
4425                    "Mctp dump capture for host (%ld/%p).\n",
4426                    vha->host_no, ha->mctp_dump);
4427                ha->mctp_dumped = 1;
4428        }
4429
4430        if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
4431                ha->flags.nic_core_reset_hdlr_active = 1;
4432                rval = qla83xx_restart_nic_firmware(vha);
4433                if (rval)
4434                        /* NIC Core reset failed. */
4435                        ql_log(ql_log_warn, vha, 0x5071,
4436                            "Failed to restart nic firmware\n");
4437                else
4438                        ql_dbg(ql_dbg_p3p, vha, 0xb084,
4439                            "Restarted NIC firmware successfully.\n");
4440                ha->flags.nic_core_reset_hdlr_active = 0;
4441        }
4442
4443        return rval;
4444
4445}
4446
4447/*
4448* qla2x00_quiesce_io
4449* Description: This function will block the new I/Os
4450*              Its not aborting any I/Os as context
4451*              is not destroyed during quiescence
4452* Arguments: scsi_qla_host_t
4453* return   : void
4454*/
4455void
4456qla2x00_quiesce_io(scsi_qla_host_t *vha)
4457{
4458        struct qla_hw_data *ha = vha->hw;
4459        struct scsi_qla_host *vp;
4460
4461        ql_dbg(ql_dbg_dpc, vha, 0x401d,
4462            "Quiescing I/O - ha=%p.\n", ha);
4463
4464        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
4465        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4466                atomic_set(&vha->loop_state, LOOP_DOWN);
4467                qla2x00_mark_all_devices_lost(vha, 0);
4468                list_for_each_entry(vp, &ha->vp_list, list)
4469                        qla2x00_mark_all_devices_lost(vp, 0);
4470        } else {
4471                if (!atomic_read(&vha->loop_down_timer))
4472                        atomic_set(&vha->loop_down_timer,
4473                                        LOOP_DOWN_TIME);
4474        }
4475        /* Wait for pending cmds to complete */
4476        qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
4477}
4478
4479void
4480qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4481{
4482        struct qla_hw_data *ha = vha->hw;
4483        struct scsi_qla_host *vp;
4484        unsigned long flags;
4485        fc_port_t *fcport;
4486
4487        /* For ISP82XX, driver waits for completion of the commands.
4488         * online flag should be set.
4489         */
4490        if (!(IS_P3P_TYPE(ha)))
4491                vha->flags.online = 0;
4492        ha->flags.chip_reset_done = 0;
4493        clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4494        vha->qla_stats.total_isp_aborts++;
4495
4496        ql_log(ql_log_info, vha, 0x00af,
4497            "Performing ISP error recovery - ha=%p.\n", ha);
4498
4499        /* For ISP82XX, reset_chip is just disabling interrupts.
4500         * Driver waits for the completion of the commands.
4501         * the interrupts need to be enabled.
4502         */
4503        if (!(IS_P3P_TYPE(ha)))
4504                ha->isp_ops->reset_chip(vha);
4505
4506        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
4507        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4508                atomic_set(&vha->loop_state, LOOP_DOWN);
4509                qla2x00_mark_all_devices_lost(vha, 0);
4510
4511                spin_lock_irqsave(&ha->vport_slock, flags);
4512                list_for_each_entry(vp, &ha->vp_list, list) {
4513                        atomic_inc(&vp->vref_count);
4514                        spin_unlock_irqrestore(&ha->vport_slock, flags);
4515
4516                        qla2x00_mark_all_devices_lost(vp, 0);
4517
4518                        spin_lock_irqsave(&ha->vport_slock, flags);
4519                        atomic_dec(&vp->vref_count);
4520                }
4521                spin_unlock_irqrestore(&ha->vport_slock, flags);
4522        } else {
4523                if (!atomic_read(&vha->loop_down_timer))
4524                        atomic_set(&vha->loop_down_timer,
4525                            LOOP_DOWN_TIME);
4526        }
4527
4528        /* Clear all async request states across all VPs. */
4529        list_for_each_entry(fcport, &vha->vp_fcports, list)
4530                fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4531        spin_lock_irqsave(&ha->vport_slock, flags);
4532        list_for_each_entry(vp, &ha->vp_list, list) {
4533                atomic_inc(&vp->vref_count);
4534                spin_unlock_irqrestore(&ha->vport_slock, flags);
4535
4536                list_for_each_entry(fcport, &vp->vp_fcports, list)
4537                        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4538
4539                spin_lock_irqsave(&ha->vport_slock, flags);
4540                atomic_dec(&vp->vref_count);
4541        }
4542        spin_unlock_irqrestore(&ha->vport_slock, flags);
4543
4544        if (!ha->flags.eeh_busy) {
4545                /* Make sure for ISP 82XX IO DMA is complete */
4546                if (IS_P3P_TYPE(ha)) {
4547                        qla82xx_chip_reset_cleanup(vha);
4548                        ql_log(ql_log_info, vha, 0x00b4,
4549                            "Done chip reset cleanup.\n");
4550
4551                        /* Done waiting for pending commands.
4552                         * Reset the online flag.
4553                         */
4554                        vha->flags.online = 0;
4555                }
4556
4557                /* Requeue all commands in outstanding command list. */
4558                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4559        }
4560}
4561
4562/*
4563*  qla2x00_abort_isp
4564*      Resets ISP and aborts all outstanding commands.
4565*
4566* Input:
4567*      ha           = adapter block pointer.
4568*
4569* Returns:
4570*      0 = success
4571*/
4572int
4573qla2x00_abort_isp(scsi_qla_host_t *vha)
4574{
4575        int rval;
4576        uint8_t        status = 0;
4577        struct qla_hw_data *ha = vha->hw;
4578        struct scsi_qla_host *vp;
4579        struct req_que *req = ha->req_q_map[0];
4580        unsigned long flags;
4581
4582        if (vha->flags.online) {
4583                qla2x00_abort_isp_cleanup(vha);
4584
4585                if (IS_QLA8031(ha)) {
4586                        ql_dbg(ql_dbg_p3p, vha, 0xb05c,
4587                            "Clearing fcoe driver presence.\n");
4588                        if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
4589                                ql_dbg(ql_dbg_p3p, vha, 0xb073,
4590                                    "Error while clearing DRV-Presence.\n");
4591                }
4592
4593                if (unlikely(pci_channel_offline(ha->pdev) &&
4594                    ha->flags.pci_channel_io_perm_failure)) {
4595                        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4596                        status = 0;
4597                        return status;
4598                }
4599
4600                ha->isp_ops->get_flash_version(vha, req->ring);
4601
4602                ha->isp_ops->nvram_config(vha);
4603
4604                if (!qla2x00_restart_isp(vha)) {
4605                        clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4606
4607                        if (!atomic_read(&vha->loop_down_timer)) {
4608                                /*
4609                                 * Issue marker command only when we are going
4610                                 * to start the I/O .
4611                                 */
4612                                vha->marker_needed = 1;
4613                        }
4614
4615                        vha->flags.online = 1;
4616
4617                        ha->isp_ops->enable_intrs(ha);
4618
4619                        ha->isp_abort_cnt = 0;
4620                        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4621
4622                        if (IS_QLA81XX(ha) || IS_QLA8031(ha))
4623                                qla2x00_get_fw_version(vha);
4624                        if (ha->fce) {
4625                                ha->flags.fce_enabled = 1;
4626                                memset(ha->fce, 0,
4627                                    fce_calc_size(ha->fce_bufs));
4628                                rval = qla2x00_enable_fce_trace(vha,
4629                                    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4630                                    &ha->fce_bufs);
4631                                if (rval) {
4632                                        ql_log(ql_log_warn, vha, 0x8033,
4633                                            "Unable to reinitialize FCE "
4634                                            "(%d).\n", rval);
4635                                        ha->flags.fce_enabled = 0;
4636                                }
4637                        }
4638
4639                        if (ha->eft) {
4640                                memset(ha->eft, 0, EFT_SIZE);
4641                                rval = qla2x00_enable_eft_trace(vha,
4642                                    ha->eft_dma, EFT_NUM_BUFFERS);
4643                                if (rval) {
4644                                        ql_log(ql_log_warn, vha, 0x8034,
4645                                            "Unable to reinitialize EFT "
4646                                            "(%d).\n", rval);
4647                                }
4648                        }
4649                } else {        /* failed the ISP abort */
4650                        vha->flags.online = 1;
4651                        if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4652                                if (ha->isp_abort_cnt == 0) {
4653                                        ql_log(ql_log_fatal, vha, 0x8035,
4654                                            "ISP error recover failed - "
4655                                            "board disabled.\n");
4656                                        /*
4657                                         * The next call disables the board
4658                                         * completely.
4659                                         */
4660                                        ha->isp_ops->reset_adapter(vha);
4661                                        vha->flags.online = 0;
4662                                        clear_bit(ISP_ABORT_RETRY,
4663                                            &vha->dpc_flags);
4664                                        status = 0;
4665                                } else { /* schedule another ISP abort */
4666                                        ha->isp_abort_cnt--;
4667                                        ql_dbg(ql_dbg_taskm, vha, 0x8020,
4668                                            "ISP abort - retry remaining %d.\n",
4669                                            ha->isp_abort_cnt);
4670                                        status = 1;
4671                                }
4672                        } else {
4673                                ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4674                                ql_dbg(ql_dbg_taskm, vha, 0x8021,
4675                                    "ISP error recovery - retrying (%d) "
4676                                    "more times.\n", ha->isp_abort_cnt);
4677                                set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4678                                status = 1;
4679                        }
4680                }
4681
4682        }
4683
4684        if (!status) {
4685                ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4686
4687                spin_lock_irqsave(&ha->vport_slock, flags);
4688                list_for_each_entry(vp, &ha->vp_list, list) {
4689                        if (vp->vp_idx) {
4690                                atomic_inc(&vp->vref_count);
4691                                spin_unlock_irqrestore(&ha->vport_slock, flags);
4692
4693                                qla2x00_vp_abort_isp(vp);
4694
4695                                spin_lock_irqsave(&ha->vport_slock, flags);
4696                                atomic_dec(&vp->vref_count);
4697                        }
4698                }
4699                spin_unlock_irqrestore(&ha->vport_slock, flags);
4700
4701                if (IS_QLA8031(ha)) {
4702                        ql_dbg(ql_dbg_p3p, vha, 0xb05d,
4703                            "Setting back fcoe driver presence.\n");
4704                        if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
4705                                ql_dbg(ql_dbg_p3p, vha, 0xb074,
4706                                    "Error while setting DRV-Presence.\n");
4707                }
4708        } else {
4709                ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4710                       __func__);
4711        }
4712
4713        return(status);
4714}
4715
4716/*
4717*  qla2x00_restart_isp
4718*      restarts the ISP after a reset
4719*
4720* Input:
4721*      ha = adapter block pointer.
4722*
4723* Returns:
4724*      0 = success
4725*/
4726static int
4727qla2x00_restart_isp(scsi_qla_host_t *vha)
4728{
4729        int status = 0;
4730        uint32_t wait_time;
4731        struct qla_hw_data *ha = vha->hw;
4732        struct req_que *req = ha->req_q_map[0];
4733        struct rsp_que *rsp = ha->rsp_q_map[0];
4734        unsigned long flags;
4735
4736        /* If firmware needs to be loaded */
4737        if (qla2x00_isp_firmware(vha)) {
4738                vha->flags.online = 0;
4739                status = ha->isp_ops->chip_diag(vha);
4740                if (!status)
4741                        status = qla2x00_setup_chip(vha);
4742        }
4743
4744        if (!status && !(status = qla2x00_init_rings(vha))) {
4745                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4746                ha->flags.chip_reset_done = 1;
4747                /* Initialize the queues in use */
4748                qla25xx_init_queues(ha);
4749
4750                status = qla2x00_fw_ready(vha);
4751                if (!status) {
4752                        ql_dbg(ql_dbg_taskm, vha, 0x8031,
4753                            "Start configure loop status = %d.\n", status);
4754
4755                        /* Issue a marker after FW becomes ready. */
4756                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4757
4758                        vha->flags.online = 1;
4759
4760                        /*
4761                         * Process any ATIO queue entries that came in
4762                         * while we weren't online.
4763                         */
4764                        spin_lock_irqsave(&ha->hardware_lock, flags);
4765                        if (qla_tgt_mode_enabled(vha))
4766                                qlt_24xx_process_atio_queue(vha);
4767                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
4768
4769                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
4770                        wait_time = 256;
4771                        do {
4772                                clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4773                                qla2x00_configure_loop(vha);
4774                                wait_time--;
4775                        } while (!atomic_read(&vha->loop_down_timer) &&
4776                                !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4777                                && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4778                                &vha->dpc_flags)));
4779                }
4780
4781                /* if no cable then assume it's good */
4782                if ((vha->device_flags & DFLG_NO_CABLE))
4783                        status = 0;
4784
4785                ql_dbg(ql_dbg_taskm, vha, 0x8032,
4786                    "Configure loop done, status = 0x%x.\n", status);
4787        }
4788        return (status);
4789}
4790
4791static int
4792qla25xx_init_queues(struct qla_hw_data *ha)
4793{
4794        struct rsp_que *rsp = NULL;
4795        struct req_que *req = NULL;
4796        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4797        int ret = -1;
4798        int i;
4799
4800        for (i = 1; i < ha->max_rsp_queues; i++) {
4801                rsp = ha->rsp_q_map[i];
4802                if (rsp) {
4803                        rsp->options &= ~BIT_0;
4804                        ret = qla25xx_init_rsp_que(base_vha, rsp);
4805                        if (ret != QLA_SUCCESS)
4806                                ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4807                                    "%s Rsp que: %d init failed.\n",
4808                                    __func__, rsp->id);
4809                        else
4810                                ql_dbg(ql_dbg_init, base_vha, 0x0100,
4811                                    "%s Rsp que: %d inited.\n",
4812                                    __func__, rsp->id);
4813                }
4814        }
4815        for (i = 1; i < ha->max_req_queues; i++) {
4816                req = ha->req_q_map[i];
4817                if (req) {
4818                /* Clear outstanding commands array. */
4819                        req->options &= ~BIT_0;
4820                        ret = qla25xx_init_req_que(base_vha, req);
4821                        if (ret != QLA_SUCCESS)
4822                                ql_dbg(ql_dbg_init, base_vha, 0x0101,
4823                                    "%s Req que: %d init failed.\n",
4824                                    __func__, req->id);
4825                        else
4826                                ql_dbg(ql_dbg_init, base_vha, 0x0102,
4827                                    "%s Req que: %d inited.\n",
4828                                    __func__, req->id);
4829                }
4830        }
4831        return ret;
4832}
4833
4834/*
4835* qla2x00_reset_adapter
4836*      Reset adapter.
4837*
4838* Input:
4839*      ha = adapter block pointer.
4840*/
4841void
4842qla2x00_reset_adapter(scsi_qla_host_t *vha)
4843{
4844        unsigned long flags = 0;
4845        struct qla_hw_data *ha = vha->hw;
4846        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4847
4848        vha->flags.online = 0;
4849        ha->isp_ops->disable_intrs(ha);
4850
4851        spin_lock_irqsave(&ha->hardware_lock, flags);
4852        WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4853        RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
4854        WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4855        RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
4856        spin_unlock_irqrestore(&ha->hardware_lock, flags);
4857}
4858
4859void
4860qla24xx_reset_adapter(scsi_qla_host_t *vha)
4861{
4862        unsigned long flags = 0;
4863        struct qla_hw_data *ha = vha->hw;
4864        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4865
4866        if (IS_P3P_TYPE(ha))
4867                return;
4868
4869        vha->flags.online = 0;
4870        ha->isp_ops->disable_intrs(ha);
4871
4872        spin_lock_irqsave(&ha->hardware_lock, flags);
4873        WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4874        RD_REG_DWORD(&reg->hccr);
4875        WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4876        RD_REG_DWORD(&reg->hccr);
4877        spin_unlock_irqrestore(&ha->hardware_lock, flags);
4878
4879        if (IS_NOPOLLING_TYPE(ha))
4880                ha->isp_ops->enable_intrs(ha);
4881}
4882
4883/* On sparc systems, obtain port and node WWN from firmware
4884 * properties.
4885 */
4886static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4887        struct nvram_24xx *nv)
4888{
4889#ifdef CONFIG_SPARC
4890        struct qla_hw_data *ha = vha->hw;
4891        struct pci_dev *pdev = ha->pdev;
4892        struct device_node *dp = pci_device_to_OF_node(pdev);
4893        const u8 *val;
4894        int len;
4895
4896        val = of_get_property(dp, "port-wwn", &len);
4897        if (val && len >= WWN_SIZE)
4898                memcpy(nv->port_name, val, WWN_SIZE);
4899
4900        val = of_get_property(dp, "node-wwn", &len);
4901        if (val && len >= WWN_SIZE)
4902                memcpy(nv->node_name, val, WWN_SIZE);
4903#endif
4904}
4905
4906int
4907qla24xx_nvram_config(scsi_qla_host_t *vha)
4908{
4909        int   rval;
4910        struct init_cb_24xx *icb;
4911        struct nvram_24xx *nv;
4912        uint32_t *dptr;
4913        uint8_t  *dptr1, *dptr2;
4914        uint32_t chksum;
4915        uint16_t cnt;
4916        struct qla_hw_data *ha = vha->hw;
4917
4918        rval = QLA_SUCCESS;
4919        icb = (struct init_cb_24xx *)ha->init_cb;
4920        nv = ha->nvram;
4921
4922        /* Determine NVRAM starting address. */
4923        if (ha->port_no == 0) {
4924                ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4925                ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4926        } else {
4927                ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4928                ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4929        }
4930
4931        ha->nvram_size = sizeof(struct nvram_24xx);
4932        ha->vpd_size = FA_NVRAM_VPD_SIZE;
4933
4934        /* Get VPD data into cache */
4935        ha->vpd = ha->nvram + VPD_OFFSET;
4936        ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4937            ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4938
4939        /* Get NVRAM data into cache and calculate checksum. */
4940        dptr = (uint32_t *)nv;
4941        ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4942            ha->nvram_size);
4943        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4944                chksum += le32_to_cpu(*dptr++);
4945
4946        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4947            "Contents of NVRAM\n");
4948        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4949            (uint8_t *)nv, ha->nvram_size);
4950
4951        /* Bad NVRAM data, set defaults parameters. */
4952        if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4953            || nv->id[3] != ' ' ||
4954            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4955                /* Reset NVRAM data. */
4956                ql_log(ql_log_warn, vha, 0x006b,
4957                    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
4958                    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4959                ql_log(ql_log_warn, vha, 0x006c,
4960                    "Falling back to functioning (yet invalid -- WWPN) "
4961                    "defaults.\n");
4962
4963                /*
4964                 * Set default initialization control block.
4965                 */
4966                memset(nv, 0, ha->nvram_size);
4967                nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4968                nv->version = __constant_cpu_to_le16(ICB_VERSION);
4969                nv->frame_payload_size = __constant_cpu_to_le16(2048);
4970                nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4971                nv->exchange_count = __constant_cpu_to_le16(0);
4972                nv->hard_address = __constant_cpu_to_le16(124);
4973                nv->port_name[0] = 0x21;
4974                nv->port_name[1] = 0x00 + ha->port_no + 1;
4975                nv->port_name[2] = 0x00;
4976                nv->port_name[3] = 0xe0;
4977                nv->port_name[4] = 0x8b;
4978                nv->port_name[5] = 0x1c;
4979                nv->port_name[6] = 0x55;
4980                nv->port_name[7] = 0x86;
4981                nv->node_name[0] = 0x20;
4982                nv->node_name[1] = 0x00;
4983                nv->node_name[2] = 0x00;
4984                nv->node_name[3] = 0xe0;
4985                nv->node_name[4] = 0x8b;
4986                nv->node_name[5] = 0x1c;
4987                nv->node_name[6] = 0x55;
4988                nv->node_name[7] = 0x86;
4989                qla24xx_nvram_wwn_from_ofw(vha, nv);
4990                nv->login_retry_count = __constant_cpu_to_le16(8);
4991                nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4992                nv->login_timeout = __constant_cpu_to_le16(0);
4993                nv->firmware_options_1 =
4994                    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4995                nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4996                nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4997                nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4998                nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4999                nv->efi_parameters = __constant_cpu_to_le32(0);
5000                nv->reset_delay = 5;
5001                nv->max_luns_per_target = __constant_cpu_to_le16(128);
5002                nv->port_down_retry_count = __constant_cpu_to_le16(30);
5003                nv->link_down_timeout = __constant_cpu_to_le16(30);
5004
5005                rval = 1;
5006        }
5007
5008        if (!qla_ini_mode_enabled(vha)) {
5009                /* Don't enable full login after initial LIP */
5010                nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5011                /* Don't enable LIP full login for initiator */
5012                nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5013        }
5014
5015        qlt_24xx_config_nvram_stage1(vha, nv);
5016
5017        /* Reset Initialization control block */
5018        memset(icb, 0, ha->init_cb_size);
5019
5020        /* Copy 1st segment. */
5021        dptr1 = (uint8_t *)icb;
5022        dptr2 = (uint8_t *)&nv->version;
5023        cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5024        while (cnt--)
5025                *dptr1++ = *dptr2++;
5026
5027        icb->login_retry_count = nv->login_retry_count;
5028        icb->link_down_on_nos = nv->link_down_on_nos;
5029
5030        /* Copy 2nd segment. */
5031        dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5032        dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5033        cnt = (uint8_t *)&icb->reserved_3 -
5034            (uint8_t *)&icb->interrupt_delay_timer;
5035        while (cnt--)
5036                *dptr1++ = *dptr2++;
5037
5038        /*
5039         * Setup driver NVRAM options.
5040         */
5041        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5042            "QLA2462");
5043
5044        qlt_24xx_config_nvram_stage2(vha, icb);
5045
5046        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5047                /* Use alternate WWN? */
5048                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5049                memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5050        }
5051
5052        /* Prepare nodename */
5053        if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5054                /*
5055                 * Firmware will apply the following mask if the nodename was
5056                 * not provided.
5057                 */
5058                memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5059                icb->node_name[0] &= 0xF0;
5060        }
5061
5062        /* Set host adapter parameters. */
5063        ha->flags.disable_risc_code_load = 0;
5064        ha->flags.enable_lip_reset = 0;
5065        ha->flags.enable_lip_full_login =
5066            le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5067        ha->flags.enable_target_reset =
5068            le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5069        ha->flags.enable_led_scheme = 0;
5070        ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5071
5072        ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5073            (BIT_6 | BIT_5 | BIT_4)) >> 4;
5074
5075        memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
5076            sizeof(ha->fw_seriallink_options24));
5077
5078        /* save HBA serial number */
5079        ha->serial0 = icb->port_name[5];
5080        ha->serial1 = icb->port_name[6];
5081        ha->serial2 = icb->port_name[7];
5082        memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5083        memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5084
5085        icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5086
5087        ha->retry_count = le16_to_cpu(nv->login_retry_count);
5088
5089        /* Set minimum login_timeout to 4 seconds. */
5090        if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5091                nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5092        if (le16_to_cpu(nv->login_timeout) < 4)
5093                nv->login_timeout = __constant_cpu_to_le16(4);
5094        ha->login_timeout = le16_to_cpu(nv->login_timeout);
5095        icb->login_timeout = nv->login_timeout;
5096
5097        /* Set minimum RATOV to 100 tenths of a second. */
5098        ha->r_a_tov = 100;
5099
5100        ha->loop_reset_delay = nv->reset_delay;
5101
5102        /* Link Down Timeout = 0:
5103         *
5104         *      When Port Down timer expires we will start returning
5105         *      I/O's to OS with "DID_NO_CONNECT".
5106         *
5107         * Link Down Timeout != 0:
5108         *
5109         *       The driver waits for the link to come up after link down
5110         *       before returning I/Os to OS with "DID_NO_CONNECT".
5111         */
5112        if (le16_to_cpu(nv->link_down_timeout) == 0) {
5113                ha->loop_down_abort_time =
5114                    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5115        } else {
5116                ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5117                ha->loop_down_abort_time =
5118                    (LOOP_DOWN_TIME - ha->link_down_timeout);
5119        }
5120
5121        /* Need enough time to try and get the port back. */
5122        ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5123        if (qlport_down_retry)
5124                ha->port_down_retry_count = qlport_down_retry;
5125
5126        /* Set login_retry_count */
5127        ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
5128        if (ha->port_down_retry_count ==
5129            le16_to_cpu(nv->port_down_retry_count) &&
5130            ha->port_down_retry_count > 3)
5131                ha->login_retry_count = ha->port_down_retry_count;
5132        else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5133                ha->login_retry_count = ha->port_down_retry_count;
5134        if (ql2xloginretrycount)
5135                ha->login_retry_count = ql2xloginretrycount;
5136
5137        /* Enable ZIO. */
5138        if (!vha->flags.init_done) {
5139                ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5140                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5141                ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5142                    le16_to_cpu(icb->interrupt_delay_timer): 2;
5143        }
5144        icb->firmware_options_2 &= __constant_cpu_to_le32(
5145            ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5146        vha->flags.process_response_queue = 0;
5147        if (ha->zio_mode != QLA_ZIO_DISABLED) {
5148                ha->zio_mode = QLA_ZIO_MODE_6;
5149
5150                ql_log(ql_log_info, vha, 0x006f,
5151                    "ZIO mode %d enabled; timer delay (%d us).\n",
5152                    ha->zio_mode, ha->zio_timer * 100);
5153
5154                icb->firmware_options_2 |= cpu_to_le32(
5155                    (uint32_t)ha->zio_mode);
5156                icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5157                vha->flags.process_response_queue = 1;
5158        }
5159
5160        if (rval) {
5161                ql_log(ql_log_warn, vha, 0x0070,
5162                    "NVRAM configuration failed.\n");
5163        }
5164        return (rval);
5165}
5166
5167static int
5168qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5169    uint32_t faddr)
5170{
5171        int     rval = QLA_SUCCESS;
5172        int     segments, fragment;
5173        uint32_t *dcode, dlen;
5174        uint32_t risc_addr;
5175        uint32_t risc_size;
5176        uint32_t i;
5177        struct qla_hw_data *ha = vha->hw;
5178        struct req_que *req = ha->req_q_map[0];
5179
5180        ql_dbg(ql_dbg_init, vha, 0x008b,
5181            "FW: Loading firmware from flash (%x).\n", faddr);
5182
5183        rval = QLA_SUCCESS;
5184
5185        segments = FA_RISC_CODE_SEGMENTS;
5186        dcode = (uint32_t *)req->ring;
5187        *srisc_addr = 0;
5188
5189        /* Validate firmware image by checking version. */
5190        qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
5191        for (i = 0; i < 4; i++)
5192                dcode[i] = be32_to_cpu(dcode[i]);
5193        if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5194            dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5195            (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5196                dcode[3] == 0)) {
5197                ql_log(ql_log_fatal, vha, 0x008c,
5198                    "Unable to verify the integrity of flash firmware "
5199                    "image.\n");
5200                ql_log(ql_log_fatal, vha, 0x008d,
5201                    "Firmware data: %08x %08x %08x %08x.\n",
5202                    dcode[0], dcode[1], dcode[2], dcode[3]);
5203
5204                return QLA_FUNCTION_FAILED;
5205        }
5206
5207        while (segments && rval == QLA_SUCCESS) {
5208                /* Read segment's load information. */
5209                qla24xx_read_flash_data(vha, dcode, faddr, 4);
5210
5211                risc_addr = be32_to_cpu(dcode[2]);
5212                *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5213                risc_size = be32_to_cpu(dcode[3]);
5214
5215                fragment = 0;
5216                while (risc_size > 0 && rval == QLA_SUCCESS) {
5217                        dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5218                        if (dlen > risc_size)
5219                                dlen = risc_size;
5220
5221                        ql_dbg(ql_dbg_init, vha, 0x008e,
5222                            "Loading risc segment@ risc addr %x "
5223                            "number of dwords 0x%x offset 0x%x.\n",
5224                            risc_addr, dlen, faddr);
5225
5226                        qla24xx_read_flash_data(vha, dcode, faddr, dlen);
5227                        for (i = 0; i < dlen; i++)
5228                                dcode[i] = swab32(dcode[i]);
5229
5230                        rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5231                            dlen);
5232                        if (rval) {
5233                                ql_log(ql_log_fatal, vha, 0x008f,
5234                                    "Failed to load segment %d of firmware.\n",
5235                                    fragment);
5236                                break;
5237                        }
5238
5239                        faddr += dlen;
5240                        risc_addr += dlen;
5241                        risc_size -= dlen;
5242                        fragment++;
5243                }
5244
5245                /* Next segment. */
5246                segments--;
5247        }
5248
5249        if (!IS_QLA27XX(ha))
5250                return rval;
5251
5252        if (ha->fw_dump_template)
5253                vfree(ha->fw_dump_template);
5254        ha->fw_dump_template = NULL;
5255        ha->fw_dump_template_len = 0;
5256
5257        ql_dbg(ql_dbg_init, vha, 0x0161,
5258            "Loading fwdump template from %x\n", faddr);
5259        qla24xx_read_flash_data(vha, dcode, faddr, 7);
5260        risc_size = be32_to_cpu(dcode[2]);
5261        ql_dbg(ql_dbg_init, vha, 0x0162,
5262            "-> array size %x dwords\n", risc_size);
5263        if (risc_size == 0 || risc_size == ~0)
5264                goto default_template;
5265
5266        dlen = (risc_size - 8) * sizeof(*dcode);
5267        ql_dbg(ql_dbg_init, vha, 0x0163,
5268            "-> template allocating %x bytes...\n", dlen);
5269        ha->fw_dump_template = vmalloc(dlen);
5270        if (!ha->fw_dump_template) {
5271                ql_log(ql_log_warn, vha, 0x0164,
5272                    "Failed fwdump template allocate %x bytes.\n", risc_size);
5273                goto default_template;
5274        }
5275
5276        faddr += 7;
5277        risc_size -= 8;
5278        dcode = ha->fw_dump_template;
5279        qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
5280        for (i = 0; i < risc_size; i++)
5281                dcode[i] = le32_to_cpu(dcode[i]);
5282
5283        if (!qla27xx_fwdt_template_valid(dcode)) {
5284                ql_log(ql_log_warn, vha, 0x0165,
5285                    "Failed fwdump template validate\n");
5286                goto default_template;
5287        }
5288
5289        dlen = qla27xx_fwdt_template_size(dcode);
5290        ql_dbg(ql_dbg_init, vha, 0x0166,
5291            "-> template size %x bytes\n", dlen);
5292        if (dlen > risc_size * sizeof(*dcode)) {
5293                ql_log(ql_log_warn, vha, 0x0167,
5294                    "Failed fwdump template exceeds array by %x bytes\n",
5295                    (uint32_t)(dlen - risc_size * sizeof(*dcode)));
5296                goto default_template;
5297        }
5298        ha->fw_dump_template_len = dlen;
5299        return rval;
5300
5301default_template:
5302        ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
5303        if (ha->fw_dump_template)
5304                vfree(ha->fw_dump_template);
5305        ha->fw_dump_template = NULL;
5306        ha->fw_dump_template_len = 0;
5307
5308        dlen = qla27xx_fwdt_template_default_size();
5309        ql_dbg(ql_dbg_init, vha, 0x0169,
5310            "-> template allocating %x bytes...\n", dlen);
5311        ha->fw_dump_template = vmalloc(dlen);
5312        if (!ha->fw_dump_template) {
5313                ql_log(ql_log_warn, vha, 0x016a,
5314                    "Failed fwdump template allocate %x bytes.\n", risc_size);
5315                goto failed_template;
5316        }
5317
5318        dcode = ha->fw_dump_template;
5319        risc_size = dlen / sizeof(*dcode);
5320        memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
5321        for (i = 0; i < risc_size; i++)
5322                dcode[i] = be32_to_cpu(dcode[i]);
5323
5324        if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5325                ql_log(ql_log_warn, vha, 0x016b,
5326                    "Failed fwdump template validate\n");
5327                goto failed_template;
5328        }
5329
5330        dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5331        ql_dbg(ql_dbg_init, vha, 0x016c,
5332            "-> template size %x bytes\n", dlen);
5333        ha->fw_dump_template_len = dlen;
5334        return rval;
5335
5336failed_template:
5337        ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
5338        if (ha->fw_dump_template)
5339                vfree(ha->fw_dump_template);
5340        ha->fw_dump_template = NULL;
5341        ha->fw_dump_template_len = 0;
5342        return rval;
5343}
5344
5345#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
5346
5347int
5348qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5349{
5350        int     rval;
5351        int     i, fragment;
5352        uint16_t *wcode, *fwcode;
5353        uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
5354        struct fw_blob *blob;
5355        struct qla_hw_data *ha = vha->hw;
5356        struct req_que *req = ha->req_q_map[0];
5357
5358        /* Load firmware blob. */
5359        blob = qla2x00_request_firmware(vha);
5360        if (!blob) {
5361                ql_log(ql_log_info, vha, 0x0083,
5362                    "Fimware image unavailable.\n");
5363                ql_log(ql_log_info, vha, 0x0084,
5364                    "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5365                return QLA_FUNCTION_FAILED;
5366        }
5367
5368        rval = QLA_SUCCESS;
5369
5370        wcode = (uint16_t *)req->ring;
5371        *srisc_addr = 0;
5372        fwcode = (uint16_t *)blob->fw->data;
5373        fwclen = 0;
5374
5375        /* Validate firmware image by checking version. */
5376        if (blob->fw->size < 8 * sizeof(uint16_t)) {
5377                ql_log(ql_log_fatal, vha, 0x0085,
5378                    "Unable to verify integrity of firmware image (%Zd).\n",
5379                    blob->fw->size);
5380                goto fail_fw_integrity;
5381        }
5382        for (i = 0; i < 4; i++)
5383                wcode[i] = be16_to_cpu(fwcode[i + 4]);
5384        if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
5385            wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
5386                wcode[2] == 0 && wcode[3] == 0)) {
5387                ql_log(ql_log_fatal, vha, 0x0086,
5388                    "Unable to verify integrity of firmware image.\n");
5389                ql_log(ql_log_fatal, vha, 0x0087,
5390                    "Firmware data: %04x %04x %04x %04x.\n",
5391                    wcode[0], wcode[1], wcode[2], wcode[3]);
5392                goto fail_fw_integrity;
5393        }
5394
5395        seg = blob->segs;
5396        while (*seg && rval == QLA_SUCCESS) {
5397                risc_addr = *seg;
5398                *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
5399                risc_size = be16_to_cpu(fwcode[3]);
5400
5401                /* Validate firmware image size. */
5402                fwclen += risc_size * sizeof(uint16_t);
5403                if (blob->fw->size < fwclen) {
5404                        ql_log(ql_log_fatal, vha, 0x0088,
5405                            "Unable to verify integrity of firmware image "
5406                            "(%Zd).\n", blob->fw->size);
5407                        goto fail_fw_integrity;
5408                }
5409
5410                fragment = 0;
5411                while (risc_size > 0 && rval == QLA_SUCCESS) {
5412                        wlen = (uint16_t)(ha->fw_transfer_size >> 1);
5413                        if (wlen > risc_size)
5414                                wlen = risc_size;
5415                        ql_dbg(ql_dbg_init, vha, 0x0089,
5416                            "Loading risc segment@ risc addr %x number of "
5417                            "words 0x%x.\n", risc_addr, wlen);
5418
5419                        for (i = 0; i < wlen; i++)
5420                                wcode[i] = swab16(fwcode[i]);
5421
5422                        rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5423                            wlen);
5424                        if (rval) {
5425                                ql_log(ql_log_fatal, vha, 0x008a,
5426                                    "Failed to load segment %d of firmware.\n",
5427                                    fragment);
5428                                break;
5429                        }
5430
5431                        fwcode += wlen;
5432                        risc_addr += wlen;
5433                        risc_size -= wlen;
5434                        fragment++;
5435                }
5436
5437                /* Next segment. */
5438                seg++;
5439        }
5440        return rval;
5441
5442fail_fw_integrity:
5443        return QLA_FUNCTION_FAILED;
5444}
5445
5446static int
5447qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5448{
5449        int     rval;
5450        int     segments, fragment;
5451        uint32_t *dcode, dlen;
5452        uint32_t risc_addr;
5453        uint32_t risc_size;
5454        uint32_t i;
5455        struct fw_blob *blob;
5456        const uint32_t *fwcode;
5457        uint32_t fwclen;
5458        struct qla_hw_data *ha = vha->hw;
5459        struct req_que *req = ha->req_q_map[0];
5460
5461        /* Load firmware blob. */
5462        blob = qla2x00_request_firmware(vha);
5463        if (!blob) {
5464                ql_log(ql_log_warn, vha, 0x0090,
5465                    "Fimware image unavailable.\n");
5466                ql_log(ql_log_warn, vha, 0x0091,
5467                    "Firmware images can be retrieved from: "
5468                    QLA_FW_URL ".\n");
5469
5470                return QLA_FUNCTION_FAILED;
5471        }
5472
5473        ql_dbg(ql_dbg_init, vha, 0x0092,
5474            "FW: Loading via request-firmware.\n");
5475
5476        rval = QLA_SUCCESS;
5477
5478        segments = FA_RISC_CODE_SEGMENTS;
5479        dcode = (uint32_t *)req->ring;
5480        *srisc_addr = 0;
5481        fwcode = (uint32_t *)blob->fw->data;
5482        fwclen = 0;
5483
5484        /* Validate firmware image by checking version. */
5485        if (blob->fw->size < 8 * sizeof(uint32_t)) {
5486                ql_log(ql_log_fatal, vha, 0x0093,
5487                    "Unable to verify integrity of firmware image (%Zd).\n",
5488                    blob->fw->size);
5489                return QLA_FUNCTION_FAILED;
5490        }
5491        for (i = 0; i < 4; i++)
5492                dcode[i] = be32_to_cpu(fwcode[i + 4]);
5493        if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5494            dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5495            (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5496                dcode[3] == 0)) {
5497                ql_log(ql_log_fatal, vha, 0x0094,
5498                    "Unable to verify integrity of firmware image (%Zd).\n",
5499                    blob->fw->size);
5500                ql_log(ql_log_fatal, vha, 0x0095,
5501                    "Firmware data: %08x %08x %08x %08x.\n",
5502                    dcode[0], dcode[1], dcode[2], dcode[3]);
5503                return QLA_FUNCTION_FAILED;
5504        }
5505
5506        while (segments && rval == QLA_SUCCESS) {
5507                risc_addr = be32_to_cpu(fwcode[2]);
5508                *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5509                risc_size = be32_to_cpu(fwcode[3]);
5510
5511                /* Validate firmware image size. */
5512                fwclen += risc_size * sizeof(uint32_t);
5513                if (blob->fw->size < fwclen) {
5514                        ql_log(ql_log_fatal, vha, 0x0096,
5515                            "Unable to verify integrity of firmware image "
5516                            "(%Zd).\n", blob->fw->size);
5517                        return QLA_FUNCTION_FAILED;
5518                }
5519
5520                fragment = 0;
5521                while (risc_size > 0 && rval == QLA_SUCCESS) {
5522                        dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5523                        if (dlen > risc_size)
5524                                dlen = risc_size;
5525
5526                        ql_dbg(ql_dbg_init, vha, 0x0097,
5527                            "Loading risc segment@ risc addr %x "
5528                            "number of dwords 0x%x.\n", risc_addr, dlen);
5529
5530                        for (i = 0; i < dlen; i++)
5531                                dcode[i] = swab32(fwcode[i]);
5532
5533                        rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5534                            dlen);
5535                        if (rval) {
5536                                ql_log(ql_log_fatal, vha, 0x0098,
5537                                    "Failed to load segment %d of firmware.\n",
5538                                    fragment);
5539                                break;
5540                        }
5541
5542                        fwcode += dlen;
5543                        risc_addr += dlen;
5544                        risc_size -= dlen;
5545                        fragment++;
5546                }
5547
5548                /* Next segment. */
5549                segments--;
5550        }
5551
5552        if (!IS_QLA27XX(ha))
5553                return rval;
5554
5555        if (ha->fw_dump_template)
5556                vfree(ha->fw_dump_template);
5557        ha->fw_dump_template = NULL;
5558        ha->fw_dump_template_len = 0;
5559
5560        ql_dbg(ql_dbg_init, vha, 0x171,
5561            "Loading fwdump template from %x\n",
5562            (uint32_t)((void *)fwcode - (void *)blob->fw->data));
5563        risc_size = be32_to_cpu(fwcode[2]);
5564        ql_dbg(ql_dbg_init, vha, 0x172,
5565            "-> array size %x dwords\n", risc_size);
5566        if (risc_size == 0 || risc_size == ~0)
5567                goto default_template;
5568
5569        dlen = (risc_size - 8) * sizeof(*fwcode);
5570        ql_dbg(ql_dbg_init, vha, 0x0173,
5571            "-> template allocating %x bytes...\n", dlen);
5572        ha->fw_dump_template = vmalloc(dlen);
5573        if (!ha->fw_dump_template) {
5574                ql_log(ql_log_warn, vha, 0x0174,
5575                    "Failed fwdump template allocate %x bytes.\n", risc_size);
5576                goto default_template;
5577        }
5578
5579        fwcode += 7;
5580        risc_size -= 8;
5581        dcode = ha->fw_dump_template;
5582        for (i = 0; i < risc_size; i++)
5583                dcode[i] = le32_to_cpu(fwcode[i]);
5584
5585        if (!qla27xx_fwdt_template_valid(dcode)) {
5586                ql_log(ql_log_warn, vha, 0x0175,
5587                    "Failed fwdump template validate\n");
5588                goto default_template;
5589        }
5590
5591        dlen = qla27xx_fwdt_template_size(dcode);
5592        ql_dbg(ql_dbg_init, vha, 0x0176,
5593            "-> template size %x bytes\n", dlen);
5594        if (dlen > risc_size * sizeof(*fwcode)) {
5595                ql_log(ql_log_warn, vha, 0x0177,
5596                    "Failed fwdump template exceeds array by %x bytes\n",
5597                    (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
5598                goto default_template;
5599        }
5600        ha->fw_dump_template_len = dlen;
5601        return rval;
5602
5603default_template:
5604        ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
5605        if (ha->fw_dump_template)
5606                vfree(ha->fw_dump_template);
5607        ha->fw_dump_template = NULL;
5608        ha->fw_dump_template_len = 0;
5609
5610        dlen = qla27xx_fwdt_template_default_size();
5611        ql_dbg(ql_dbg_init, vha, 0x0179,
5612            "-> template allocating %x bytes...\n", dlen);
5613        ha->fw_dump_template = vmalloc(dlen);
5614        if (!ha->fw_dump_template) {
5615                ql_log(ql_log_warn, vha, 0x017a,
5616                    "Failed fwdump template allocate %x bytes.\n", risc_size);
5617                goto failed_template;
5618        }
5619
5620        dcode = ha->fw_dump_template;
5621        risc_size = dlen / sizeof(*fwcode);
5622        fwcode = qla27xx_fwdt_template_default();
5623        for (i = 0; i < risc_size; i++)
5624                dcode[i] = be32_to_cpu(fwcode[i]);
5625
5626        if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5627                ql_log(ql_log_warn, vha, 0x017b,
5628                    "Failed fwdump template validate\n");
5629                goto failed_template;
5630        }
5631
5632        dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5633        ql_dbg(ql_dbg_init, vha, 0x017c,
5634            "-> template size %x bytes\n", dlen);
5635        ha->fw_dump_template_len = dlen;
5636        return rval;
5637
5638failed_template:
5639        ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
5640        if (ha->fw_dump_template)
5641                vfree(ha->fw_dump_template);
5642        ha->fw_dump_template = NULL;
5643        ha->fw_dump_template_len = 0;
5644        return rval;
5645}
5646
5647int
5648qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5649{
5650        int rval;
5651
5652        if (ql2xfwloadbin == 1)
5653                return qla81xx_load_risc(vha, srisc_addr);
5654
5655        /*
5656         * FW Load priority:
5657         * 1) Firmware via request-firmware interface (.bin file).
5658         * 2) Firmware residing in flash.
5659         */
5660        rval = qla24xx_load_risc_blob(vha, srisc_addr);
5661        if (rval == QLA_SUCCESS)
5662                return rval;
5663
5664        return qla24xx_load_risc_flash(vha, srisc_addr,
5665            vha->hw->flt_region_fw);
5666}
5667
5668int
5669qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5670{
5671        int rval;
5672        struct qla_hw_data *ha = vha->hw;
5673
5674        if (ql2xfwloadbin == 2)
5675                goto try_blob_fw;
5676
5677        /*
5678         * FW Load priority:
5679         * 1) Firmware residing in flash.
5680         * 2) Firmware via request-firmware interface (.bin file).
5681         * 3) Golden-Firmware residing in flash -- limited operation.
5682         */
5683        rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
5684        if (rval == QLA_SUCCESS)
5685                return rval;
5686
5687try_blob_fw:
5688        rval = qla24xx_load_risc_blob(vha, srisc_addr);
5689        if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
5690                return rval;
5691
5692        ql_log(ql_log_info, vha, 0x0099,
5693            "Attempting to fallback to golden firmware.\n");
5694        rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
5695        if (rval != QLA_SUCCESS)
5696                return rval;
5697
5698        ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
5699        ha->flags.running_gold_fw = 1;
5700        return rval;
5701}
5702
5703void
5704qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
5705{
5706        int ret, retries;
5707        struct qla_hw_data *ha = vha->hw;
5708
5709        if (ha->flags.pci_channel_io_perm_failure)
5710                return;
5711        if (!IS_FWI2_CAPABLE(ha))
5712                return;
5713        if (!ha->fw_major_version)
5714                return;
5715
5716        ret = qla2x00_stop_firmware(vha);
5717        for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
5718            ret != QLA_INVALID_COMMAND && retries ; retries--) {
5719                ha->isp_ops->reset_chip(vha);
5720                if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
5721                        continue;
5722                if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
5723                        continue;
5724                ql_log(ql_log_info, vha, 0x8015,
5725                    "Attempting retry of stop-firmware command.\n");
5726                ret = qla2x00_stop_firmware(vha);
5727        }
5728}
5729
5730int
5731qla24xx_configure_vhba(scsi_qla_host_t *vha)
5732{
5733        int rval = QLA_SUCCESS;
5734        int rval2;
5735        uint16_t mb[MAILBOX_REGISTER_COUNT];
5736        struct qla_hw_data *ha = vha->hw;
5737        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5738        struct req_que *req;
5739        struct rsp_que *rsp;
5740
5741        if (!vha->vp_idx)
5742                return -EINVAL;
5743
5744        rval = qla2x00_fw_ready(base_vha);
5745        if (ha->flags.cpu_affinity_enabled)
5746                req = ha->req_q_map[0];
5747        else
5748                req = vha->req;
5749        rsp = req->rsp;
5750
5751        if (rval == QLA_SUCCESS) {
5752                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5753                qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5754        }
5755
5756        vha->flags.management_server_logged_in = 0;
5757
5758        /* Login to SNS first */
5759        rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
5760            BIT_1);
5761        if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5762                if (rval2 == QLA_MEMORY_ALLOC_FAILED)
5763                        ql_dbg(ql_dbg_init, vha, 0x0120,
5764                            "Failed SNS login: loop_id=%x, rval2=%d\n",
5765                            NPH_SNS, rval2);
5766                else
5767                        ql_dbg(ql_dbg_init, vha, 0x0103,
5768                            "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
5769                            "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
5770                            NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5771                return (QLA_FUNCTION_FAILED);
5772        }
5773
5774        atomic_set(&vha->loop_down_timer, 0);
5775        atomic_set(&vha->loop_state, LOOP_UP);
5776        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5777        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5778        rval = qla2x00_loop_resync(base_vha);
5779
5780        return rval;
5781}
5782
5783/* 84XX Support **************************************************************/
5784
5785static LIST_HEAD(qla_cs84xx_list);
5786static DEFINE_MUTEX(qla_cs84xx_mutex);
5787
5788static struct qla_chip_state_84xx *
5789qla84xx_get_chip(struct scsi_qla_host *vha)
5790{
5791        struct qla_chip_state_84xx *cs84xx;
5792        struct qla_hw_data *ha = vha->hw;
5793
5794        mutex_lock(&qla_cs84xx_mutex);
5795
5796        /* Find any shared 84xx chip. */
5797        list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
5798                if (cs84xx->bus == ha->pdev->bus) {
5799                        kref_get(&cs84xx->kref);
5800                        goto done;
5801                }
5802        }
5803
5804        cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
5805        if (!cs84xx)
5806                goto done;
5807
5808        kref_init(&cs84xx->kref);
5809        spin_lock_init(&cs84xx->access_lock);
5810        mutex_init(&cs84xx->fw_update_mutex);
5811        cs84xx->bus = ha->pdev->bus;
5812
5813        list_add_tail(&cs84xx->list, &qla_cs84xx_list);
5814done:
5815        mutex_unlock(&qla_cs84xx_mutex);
5816        return cs84xx;
5817}
5818
5819static void
5820__qla84xx_chip_release(struct kref *kref)
5821{
5822        struct qla_chip_state_84xx *cs84xx =
5823            container_of(kref, struct qla_chip_state_84xx, kref);
5824
5825        mutex_lock(&qla_cs84xx_mutex);
5826        list_del(&cs84xx->list);
5827        mutex_unlock(&qla_cs84xx_mutex);
5828        kfree(cs84xx);
5829}
5830
5831void
5832qla84xx_put_chip(struct scsi_qla_host *vha)
5833{
5834        struct qla_hw_data *ha = vha->hw;
5835        if (ha->cs84xx)
5836                kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
5837}
5838
5839static int
5840qla84xx_init_chip(scsi_qla_host_t *vha)
5841{
5842        int rval;
5843        uint16_t status[2];
5844        struct qla_hw_data *ha = vha->hw;
5845
5846        mutex_lock(&ha->cs84xx->fw_update_mutex);
5847
5848        rval = qla84xx_verify_chip(vha, status);
5849
5850        mutex_unlock(&ha->cs84xx->fw_update_mutex);
5851
5852        return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
5853            QLA_SUCCESS;
5854}
5855
5856/* 81XX Support **************************************************************/
5857
5858int
5859qla81xx_nvram_config(scsi_qla_host_t *vha)
5860{
5861        int   rval;
5862        struct init_cb_81xx *icb;
5863        struct nvram_81xx *nv;
5864        uint32_t *dptr;
5865        uint8_t  *dptr1, *dptr2;
5866        uint32_t chksum;
5867        uint16_t cnt;
5868        struct qla_hw_data *ha = vha->hw;
5869
5870        rval = QLA_SUCCESS;
5871        icb = (struct init_cb_81xx *)ha->init_cb;
5872        nv = ha->nvram;
5873
5874        /* Determine NVRAM starting address. */
5875        ha->nvram_size = sizeof(struct nvram_81xx);
5876        ha->vpd_size = FA_NVRAM_VPD_SIZE;
5877        if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
5878                ha->vpd_size = FA_VPD_SIZE_82XX;
5879
5880        /* Get VPD data into cache */
5881        ha->vpd = ha->nvram + VPD_OFFSET;
5882        ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5883            ha->vpd_size);
5884
5885        /* Get NVRAM data into cache and calculate checksum. */
5886        ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
5887            ha->nvram_size);
5888        dptr = (uint32_t *)nv;
5889        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5890                chksum += le32_to_cpu(*dptr++);
5891
5892        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5893            "Contents of NVRAM:\n");
5894        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5895            (uint8_t *)nv, ha->nvram_size);
5896
5897        /* Bad NVRAM data, set defaults parameters. */
5898        if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5899            || nv->id[3] != ' ' ||
5900            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5901                /* Reset NVRAM data. */
5902                ql_log(ql_log_info, vha, 0x0073,
5903                    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
5904                    "version=0x%x.\n", chksum, nv->id[0],
5905                    le16_to_cpu(nv->nvram_version));
5906                ql_log(ql_log_info, vha, 0x0074,
5907                    "Falling back to functioning (yet invalid -- WWPN) "
5908                    "defaults.\n");
5909
5910                /*
5911                 * Set default initialization control block.
5912                 */
5913                memset(nv, 0, ha->nvram_size);
5914                nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5915                nv->version = __constant_cpu_to_le16(ICB_VERSION);
5916                nv->frame_payload_size = __constant_cpu_to_le16(2048);
5917                nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5918                nv->exchange_count = __constant_cpu_to_le16(0);
5919                nv->port_name[0] = 0x21;
5920                nv->port_name[1] = 0x00 + ha->port_no + 1;
5921                nv->port_name[2] = 0x00;
5922                nv->port_name[3] = 0xe0;
5923                nv->port_name[4] = 0x8b;
5924                nv->port_name[5] = 0x1c;
5925                nv->port_name[6] = 0x55;
5926                nv->port_name[7] = 0x86;
5927                nv->node_name[0] = 0x20;
5928                nv->node_name[1] = 0x00;
5929                nv->node_name[2] = 0x00;
5930                nv->node_name[3] = 0xe0;
5931                nv->node_name[4] = 0x8b;
5932                nv->node_name[5] = 0x1c;
5933                nv->node_name[6] = 0x55;
5934                nv->node_name[7] = 0x86;
5935                nv->login_retry_count = __constant_cpu_to_le16(8);
5936                nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5937                nv->login_timeout = __constant_cpu_to_le16(0);
5938                nv->firmware_options_1 =
5939                    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5940                nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5941                nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5942                nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5943                nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5944                nv->efi_parameters = __constant_cpu_to_le32(0);
5945                nv->reset_delay = 5;
5946                nv->max_luns_per_target = __constant_cpu_to_le16(128);
5947                nv->port_down_retry_count = __constant_cpu_to_le16(30);
5948                nv->link_down_timeout = __constant_cpu_to_le16(180);
5949                nv->enode_mac[0] = 0x00;
5950                nv->enode_mac[1] = 0xC0;
5951                nv->enode_mac[2] = 0xDD;
5952                nv->enode_mac[3] = 0x04;
5953                nv->enode_mac[4] = 0x05;
5954                nv->enode_mac[5] = 0x06 + ha->port_no + 1;
5955
5956                rval = 1;
5957        }
5958
5959        if (IS_T10_PI_CAPABLE(ha))
5960                nv->frame_payload_size &= ~7;
5961
5962        qlt_81xx_config_nvram_stage1(vha, nv);
5963
5964        /* Reset Initialization control block */
5965        memset(icb, 0, ha->init_cb_size);
5966
5967        /* Copy 1st segment. */
5968        dptr1 = (uint8_t *)icb;
5969        dptr2 = (uint8_t *)&nv->version;
5970        cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5971        while (cnt--)
5972                *dptr1++ = *dptr2++;
5973
5974        icb->login_retry_count = nv->login_retry_count;
5975
5976        /* Copy 2nd segment. */
5977        dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5978        dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5979        cnt = (uint8_t *)&icb->reserved_5 -
5980            (uint8_t *)&icb->interrupt_delay_timer;
5981        while (cnt--)
5982                *dptr1++ = *dptr2++;
5983
5984        memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5985        /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5986        if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5987                icb->enode_mac[0] = 0x00;
5988                icb->enode_mac[1] = 0xC0;
5989                icb->enode_mac[2] = 0xDD;
5990                icb->enode_mac[3] = 0x04;
5991                icb->enode_mac[4] = 0x05;
5992                icb->enode_mac[5] = 0x06 + ha->port_no + 1;
5993        }
5994
5995        /* Use extended-initialization control block. */
5996        memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5997
5998        /*
5999         * Setup driver NVRAM options.
6000         */
6001        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6002            "QLE8XXX");
6003
6004        qlt_81xx_config_nvram_stage2(vha, icb);
6005
6006        /* Use alternate WWN? */
6007        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
6008                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6009                memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6010        }
6011
6012        /* Prepare nodename */
6013        if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
6014                /*
6015                 * Firmware will apply the following mask if the nodename was
6016                 * not provided.
6017                 */
6018                memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6019                icb->node_name[0] &= 0xF0;
6020        }
6021
6022        /* Set host adapter parameters. */
6023        ha->flags.disable_risc_code_load = 0;
6024        ha->flags.enable_lip_reset = 0;
6025        ha->flags.enable_lip_full_login =
6026            le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6027        ha->flags.enable_target_reset =
6028            le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6029        ha->flags.enable_led_scheme = 0;
6030        ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6031
6032        ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6033            (BIT_6 | BIT_5 | BIT_4)) >> 4;
6034
6035        /* save HBA serial number */
6036        ha->serial0 = icb->port_name[5];
6037        ha->serial1 = icb->port_name[6];
6038        ha->serial2 = icb->port_name[7];
6039        memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6040        memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6041
6042        icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
6043
6044        ha->retry_count = le16_to_cpu(nv->login_retry_count);
6045
6046        /* Set minimum login_timeout to 4 seconds. */
6047        if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6048                nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6049        if (le16_to_cpu(nv->login_timeout) < 4)
6050                nv->login_timeout = __constant_cpu_to_le16(4);
6051        ha->login_timeout = le16_to_cpu(nv->login_timeout);
6052        icb->login_timeout = nv->login_timeout;
6053
6054        /* Set minimum RATOV to 100 tenths of a second. */
6055        ha->r_a_tov = 100;
6056
6057        ha->loop_reset_delay = nv->reset_delay;
6058
6059        /* Link Down Timeout = 0:
6060         *
6061         *      When Port Down timer expires we will start returning
6062         *      I/O's to OS with "DID_NO_CONNECT".
6063         *
6064         * Link Down Timeout != 0:
6065         *
6066         *       The driver waits for the link to come up after link down
6067         *       before returning I/Os to OS with "DID_NO_CONNECT".
6068         */
6069        if (le16_to_cpu(nv->link_down_timeout) == 0) {
6070                ha->loop_down_abort_time =
6071                    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6072        } else {
6073                ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6074                ha->loop_down_abort_time =
6075                    (LOOP_DOWN_TIME - ha->link_down_timeout);
6076        }
6077
6078        /* Need enough time to try and get the port back. */
6079        ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6080        if (qlport_down_retry)
6081                ha->port_down_retry_count = qlport_down_retry;
6082
6083        /* Set login_retry_count */
6084        ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
6085        if (ha->port_down_retry_count ==
6086            le16_to_cpu(nv->port_down_retry_count) &&
6087            ha->port_down_retry_count > 3)
6088                ha->login_retry_count = ha->port_down_retry_count;
6089        else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6090                ha->login_retry_count = ha->port_down_retry_count;
6091        if (ql2xloginretrycount)
6092                ha->login_retry_count = ql2xloginretrycount;
6093
6094        /* if not running MSI-X we need handshaking on interrupts */
6095        if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
6096                icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
6097
6098        /* Enable ZIO. */
6099        if (!vha->flags.init_done) {
6100                ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6101                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6102                ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6103                    le16_to_cpu(icb->interrupt_delay_timer): 2;
6104        }
6105        icb->firmware_options_2 &= __constant_cpu_to_le32(
6106            ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6107        vha->flags.process_response_queue = 0;
6108        if (ha->zio_mode != QLA_ZIO_DISABLED) {
6109                ha->zio_mode = QLA_ZIO_MODE_6;
6110
6111                ql_log(ql_log_info, vha, 0x0075,
6112                    "ZIO mode %d enabled; timer delay (%d us).\n",
6113                    ha->zio_mode,
6114                    ha->zio_timer * 100);
6115
6116                icb->firmware_options_2 |= cpu_to_le32(
6117                    (uint32_t)ha->zio_mode);
6118                icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6119                vha->flags.process_response_queue = 1;
6120        }
6121
6122        if (rval) {
6123                ql_log(ql_log_warn, vha, 0x0076,
6124                    "NVRAM configuration failed.\n");
6125        }
6126        return (rval);
6127}
6128
6129int
6130qla82xx_restart_isp(scsi_qla_host_t *vha)
6131{
6132        int status, rval;
6133        uint32_t wait_time;
6134        struct qla_hw_data *ha = vha->hw;
6135        struct req_que *req = ha->req_q_map[0];
6136        struct rsp_que *rsp = ha->rsp_q_map[0];
6137        struct scsi_qla_host *vp;
6138        unsigned long flags;
6139
6140        status = qla2x00_init_rings(vha);
6141        if (!status) {
6142                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6143                ha->flags.chip_reset_done = 1;
6144
6145                status = qla2x00_fw_ready(vha);
6146                if (!status) {
6147                        ql_log(ql_log_info, vha, 0x803c,
6148                            "Start configure loop, status =%d.\n", status);
6149
6150                        /* Issue a marker after FW becomes ready. */
6151                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6152
6153                        vha->flags.online = 1;
6154                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
6155                        wait_time = 256;
6156                        do {
6157                                clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6158                                qla2x00_configure_loop(vha);
6159                                wait_time--;
6160                        } while (!atomic_read(&vha->loop_down_timer) &&
6161                            !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
6162                            wait_time &&
6163                            (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
6164                }
6165
6166                /* if no cable then assume it's good */
6167                if ((vha->device_flags & DFLG_NO_CABLE))
6168                        status = 0;
6169
6170                ql_log(ql_log_info, vha, 0x8000,
6171                    "Configure loop done, status = 0x%x.\n", status);
6172        }
6173
6174        if (!status) {
6175                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6176
6177                if (!atomic_read(&vha->loop_down_timer)) {
6178                        /*
6179                         * Issue marker command only when we are going
6180                         * to start the I/O .
6181                         */
6182                        vha->marker_needed = 1;
6183                }
6184
6185                vha->flags.online = 1;
6186
6187                ha->isp_ops->enable_intrs(ha);
6188
6189                ha->isp_abort_cnt = 0;
6190                clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6191
6192                /* Update the firmware version */
6193                status = qla82xx_check_md_needed(vha);
6194
6195                if (ha->fce) {
6196                        ha->flags.fce_enabled = 1;
6197                        memset(ha->fce, 0,
6198                            fce_calc_size(ha->fce_bufs));
6199                        rval = qla2x00_enable_fce_trace(vha,
6200                            ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6201                            &ha->fce_bufs);
6202                        if (rval) {
6203                                ql_log(ql_log_warn, vha, 0x8001,
6204                                    "Unable to reinitialize FCE (%d).\n",
6205                                    rval);
6206                                ha->flags.fce_enabled = 0;
6207                        }
6208                }
6209
6210                if (ha->eft) {
6211                        memset(ha->eft, 0, EFT_SIZE);
6212                        rval = qla2x00_enable_eft_trace(vha,
6213                            ha->eft_dma, EFT_NUM_BUFFERS);
6214                        if (rval) {
6215                                ql_log(ql_log_warn, vha, 0x8010,
6216                                    "Unable to reinitialize EFT (%d).\n",
6217                                    rval);
6218                        }
6219                }
6220        }
6221
6222        if (!status) {
6223                ql_dbg(ql_dbg_taskm, vha, 0x8011,
6224                    "qla82xx_restart_isp succeeded.\n");
6225
6226                spin_lock_irqsave(&ha->vport_slock, flags);
6227                list_for_each_entry(vp, &ha->vp_list, list) {
6228                        if (vp->vp_idx) {
6229                                atomic_inc(&vp->vref_count);
6230                                spin_unlock_irqrestore(&ha->vport_slock, flags);
6231
6232                                qla2x00_vp_abort_isp(vp);
6233
6234                                spin_lock_irqsave(&ha->vport_slock, flags);
6235                                atomic_dec(&vp->vref_count);
6236                        }
6237                }
6238                spin_unlock_irqrestore(&ha->vport_slock, flags);
6239
6240        } else {
6241                ql_log(ql_log_warn, vha, 0x8016,
6242                    "qla82xx_restart_isp **** FAILED ****.\n");
6243        }
6244
6245        return status;
6246}
6247
6248void
6249qla81xx_update_fw_options(scsi_qla_host_t *vha)
6250{
6251        struct qla_hw_data *ha = vha->hw;
6252
6253        if (!ql2xetsenable)
6254                return;
6255
6256        /* Enable ETS Burst. */
6257        memset(ha->fw_options, 0, sizeof(ha->fw_options));
6258        ha->fw_options[2] |= BIT_9;
6259        qla2x00_set_fw_options(vha, ha->fw_options);
6260}
6261
6262/*
6263 * qla24xx_get_fcp_prio
6264 *      Gets the fcp cmd priority value for the logged in port.
6265 *      Looks for a match of the port descriptors within
6266 *      each of the fcp prio config entries. If a match is found,
6267 *      the tag (priority) value is returned.
6268 *
6269 * Input:
6270 *      vha = scsi host structure pointer.
6271 *      fcport = port structure pointer.
6272 *
6273 * Return:
6274 *      non-zero (if found)
6275 *      -1 (if not found)
6276 *
6277 * Context:
6278 *      Kernel context
6279 */
6280static int
6281qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
6282{
6283        int i, entries;
6284        uint8_t pid_match, wwn_match;
6285        int priority;
6286        uint32_t pid1, pid2;
6287        uint64_t wwn1, wwn2;
6288        struct qla_fcp_prio_entry *pri_entry;
6289        struct qla_hw_data *ha = vha->hw;
6290
6291        if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
6292                return -1;
6293
6294        priority = -1;
6295        entries = ha->fcp_prio_cfg->num_entries;
6296        pri_entry = &ha->fcp_prio_cfg->entry[0];
6297
6298        for (i = 0; i < entries; i++) {
6299                pid_match = wwn_match = 0;
6300
6301                if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
6302                        pri_entry++;
6303                        continue;
6304                }
6305
6306                /* check source pid for a match */
6307                if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
6308                        pid1 = pri_entry->src_pid & INVALID_PORT_ID;
6309                        pid2 = vha->d_id.b24 & INVALID_PORT_ID;
6310                        if (pid1 == INVALID_PORT_ID)
6311                                pid_match++;
6312                        else if (pid1 == pid2)
6313                                pid_match++;
6314                }
6315
6316                /* check destination pid for a match */
6317                if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
6318                        pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
6319                        pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
6320                        if (pid1 == INVALID_PORT_ID)
6321                                pid_match++;
6322                        else if (pid1 == pid2)
6323                                pid_match++;
6324                }
6325
6326                /* check source WWN for a match */
6327                if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
6328                        wwn1 = wwn_to_u64(vha->port_name);
6329                        wwn2 = wwn_to_u64(pri_entry->src_wwpn);
6330                        if (wwn2 == (uint64_t)-1)
6331                                wwn_match++;
6332                        else if (wwn1 == wwn2)
6333                                wwn_match++;
6334                }
6335
6336                /* check destination WWN for a match */
6337                if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
6338                        wwn1 = wwn_to_u64(fcport->port_name);
6339                        wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
6340                        if (wwn2 == (uint64_t)-1)
6341                                wwn_match++;
6342                        else if (wwn1 == wwn2)
6343                                wwn_match++;
6344                }
6345
6346                if (pid_match == 2 || wwn_match == 2) {
6347                        /* Found a matching entry */
6348                        if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
6349                                priority = pri_entry->tag;
6350                        break;
6351                }
6352
6353                pri_entry++;
6354        }
6355
6356        return priority;
6357}
6358
6359/*
6360 * qla24xx_update_fcport_fcp_prio
6361 *      Activates fcp priority for the logged in fc port
6362 *
6363 * Input:
6364 *      vha = scsi host structure pointer.
6365 *      fcp = port structure pointer.
6366 *
6367 * Return:
6368 *      QLA_SUCCESS or QLA_FUNCTION_FAILED
6369 *
6370 * Context:
6371 *      Kernel context.
6372 */
6373int
6374qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
6375{
6376        int ret;
6377        int priority;
6378        uint16_t mb[5];
6379
6380        if (fcport->port_type != FCT_TARGET ||
6381            fcport->loop_id == FC_NO_LOOP_ID)
6382                return QLA_FUNCTION_FAILED;
6383
6384        priority = qla24xx_get_fcp_prio(vha, fcport);
6385        if (priority < 0)
6386                return QLA_FUNCTION_FAILED;
6387
6388        if (IS_P3P_TYPE(vha->hw)) {
6389                fcport->fcp_prio = priority & 0xf;
6390                return QLA_SUCCESS;
6391        }
6392
6393        ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
6394        if (ret == QLA_SUCCESS) {
6395                if (fcport->fcp_prio != priority)
6396                        ql_dbg(ql_dbg_user, vha, 0x709e,
6397                            "Updated FCP_CMND priority - value=%d loop_id=%d "
6398                            "port_id=%02x%02x%02x.\n", priority,
6399                            fcport->loop_id, fcport->d_id.b.domain,
6400                            fcport->d_id.b.area, fcport->d_id.b.al_pa);
6401                fcport->fcp_prio = priority & 0xf;
6402        } else
6403                ql_dbg(ql_dbg_user, vha, 0x704f,
6404                    "Unable to update FCP_CMND priority - ret=0x%x for "
6405                    "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
6406                    fcport->d_id.b.domain, fcport->d_id.b.area,
6407                    fcport->d_id.b.al_pa);
6408        return  ret;
6409}
6410
6411/*
6412 * qla24xx_update_all_fcp_prio
6413 *      Activates fcp priority for all the logged in ports
6414 *
6415 * Input:
6416 *      ha = adapter block pointer.
6417 *
6418 * Return:
6419 *      QLA_SUCCESS or QLA_FUNCTION_FAILED
6420 *
6421 * Context:
6422 *      Kernel context.
6423 */
6424int
6425qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
6426{
6427        int ret;
6428        fc_port_t *fcport;
6429
6430        ret = QLA_FUNCTION_FAILED;
6431        /* We need to set priority for all logged in ports */
6432        list_for_each_entry(fcport, &vha->vp_fcports, list)
6433                ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
6434
6435        return ret;
6436}
6437