linux/drivers/scsi/qla2xxx/qla_mid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * QLogic Fibre Channel HBA Driver
   4 * Copyright (c)  2003-2014 QLogic Corporation
   5 */
   6#include "qla_def.h"
   7#include "qla_gbl.h"
   8#include "qla_target.h"
   9
  10#include <linux/moduleparam.h>
  11#include <linux/vmalloc.h>
  12#include <linux/slab.h>
  13#include <linux/list.h>
  14
  15#include <scsi/scsi_tcq.h>
  16#include <scsi/scsicam.h>
  17#include <linux/delay.h>
  18
  19void
  20qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  21{
  22        if (vha->vp_idx && vha->timer_active) {
  23                del_timer_sync(&vha->timer);
  24                vha->timer_active = 0;
  25        }
  26}
  27
  28static uint32_t
  29qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  30{
  31        uint32_t vp_id;
  32        struct qla_hw_data *ha = vha->hw;
  33        unsigned long flags;
  34
  35        /* Find an empty slot and assign an vp_id */
  36        mutex_lock(&ha->vport_lock);
  37        vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  38        if (vp_id > ha->max_npiv_vports) {
  39                ql_dbg(ql_dbg_vport, vha, 0xa000,
  40                    "vp_id %d is bigger than max-supported %d.\n",
  41                    vp_id, ha->max_npiv_vports);
  42                mutex_unlock(&ha->vport_lock);
  43                return vp_id;
  44        }
  45
  46        set_bit(vp_id, ha->vp_idx_map);
  47        ha->num_vhosts++;
  48        vha->vp_idx = vp_id;
  49
  50        spin_lock_irqsave(&ha->vport_slock, flags);
  51        list_add_tail(&vha->list, &ha->vp_list);
  52        spin_unlock_irqrestore(&ha->vport_slock, flags);
  53
  54        spin_lock_irqsave(&ha->hardware_lock, flags);
  55        qlt_update_vp_map(vha, SET_VP_IDX);
  56        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  57
  58        mutex_unlock(&ha->vport_lock);
  59        return vp_id;
  60}
  61
  62void
  63qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  64{
  65        uint16_t vp_id;
  66        struct qla_hw_data *ha = vha->hw;
  67        unsigned long flags = 0;
  68        u32 i, bailout;
  69
  70        mutex_lock(&ha->vport_lock);
  71        /*
  72         * Wait for all pending activities to finish before removing vport from
  73         * the list.
  74         * Lock needs to be held for safe removal from the list (it
  75         * ensures no active vp_list traversal while the vport is removed
  76         * from the queue)
  77         */
  78        bailout = 0;
  79        for (i = 0; i < 500; i++) {
  80                spin_lock_irqsave(&ha->vport_slock, flags);
  81                if (atomic_read(&vha->vref_count) == 0) {
  82                        list_del(&vha->list);
  83                        qlt_update_vp_map(vha, RESET_VP_IDX);
  84                        bailout = 1;
  85                }
  86                spin_unlock_irqrestore(&ha->vport_slock, flags);
  87
  88                if (bailout)
  89                        break;
  90                else
  91                        msleep(20);
  92        }
  93        if (!bailout) {
  94                ql_log(ql_log_info, vha, 0xfffa,
  95                        "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  96                spin_lock_irqsave(&ha->vport_slock, flags);
  97                list_del(&vha->list);
  98                qlt_update_vp_map(vha, RESET_VP_IDX);
  99                spin_unlock_irqrestore(&ha->vport_slock, flags);
 100        }
 101
 102        vp_id = vha->vp_idx;
 103        ha->num_vhosts--;
 104        clear_bit(vp_id, ha->vp_idx_map);
 105
 106        mutex_unlock(&ha->vport_lock);
 107}
 108
 109static scsi_qla_host_t *
 110qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 111{
 112        scsi_qla_host_t *vha;
 113        struct scsi_qla_host *tvha;
 114        unsigned long flags;
 115
 116        spin_lock_irqsave(&ha->vport_slock, flags);
 117        /* Locate matching device in database. */
 118        list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
 119                if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
 120                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 121                        return vha;
 122                }
 123        }
 124        spin_unlock_irqrestore(&ha->vport_slock, flags);
 125        return NULL;
 126}
 127
 128/*
 129 * qla2x00_mark_vp_devices_dead
 130 *      Updates fcport state when device goes offline.
 131 *
 132 * Input:
 133 *      ha = adapter block pointer.
 134 *      fcport = port structure pointer.
 135 *
 136 * Return:
 137 *      None.
 138 *
 139 * Context:
 140 */
 141static void
 142qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 143{
 144        /*
 145         * !!! NOTE !!!
 146         * This function, if called in contexts other than vp create, disable
 147         * or delete, please make sure this is synchronized with the
 148         * delete thread.
 149         */
 150        fc_port_t *fcport;
 151
 152        list_for_each_entry(fcport, &vha->vp_fcports, list) {
 153                ql_dbg(ql_dbg_vport, vha, 0xa001,
 154                    "Marking port dead, loop_id=0x%04x : %x.\n",
 155                    fcport->loop_id, fcport->vha->vp_idx);
 156
 157                qla2x00_mark_device_lost(vha, fcport, 0);
 158                qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
 159        }
 160}
 161
 162int
 163qla24xx_disable_vp(scsi_qla_host_t *vha)
 164{
 165        unsigned long flags;
 166        int ret = QLA_SUCCESS;
 167        fc_port_t *fcport;
 168
 169        if (vha->hw->flags.edif_enabled)
 170                /* delete sessions and flush sa_indexes */
 171                qla2x00_wait_for_sess_deletion(vha);
 172
 173        if (vha->hw->flags.fw_started)
 174                ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 175
 176        atomic_set(&vha->loop_state, LOOP_DOWN);
 177        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 178        list_for_each_entry(fcport, &vha->vp_fcports, list)
 179                fcport->logout_on_delete = 0;
 180
 181        if (!vha->hw->flags.edif_enabled)
 182                qla2x00_wait_for_sess_deletion(vha);
 183
 184        /* Remove port id from vp target map */
 185        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 186        qlt_update_vp_map(vha, RESET_AL_PA);
 187        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 188
 189        qla2x00_mark_vp_devices_dead(vha);
 190        atomic_set(&vha->vp_state, VP_FAILED);
 191        vha->flags.management_server_logged_in = 0;
 192        if (ret == QLA_SUCCESS) {
 193                fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
 194        } else {
 195                fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 196                return -1;
 197        }
 198        return 0;
 199}
 200
 201int
 202qla24xx_enable_vp(scsi_qla_host_t *vha)
 203{
 204        int ret;
 205        struct qla_hw_data *ha = vha->hw;
 206        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 207
 208        /* Check if physical ha port is Up */
 209        if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
 210                atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
 211                !(ha->current_topology & ISP_CFG_F)) {
 212                vha->vp_err_state =  VP_ERR_PORTDWN;
 213                fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
 214                ql_dbg(ql_dbg_taskm, vha, 0x800b,
 215                    "%s skip enable. loop_state %x topo %x\n",
 216                    __func__, base_vha->loop_state.counter,
 217                    ha->current_topology);
 218
 219                goto enable_failed;
 220        }
 221
 222        /* Initialize the new vport unless it is a persistent port */
 223        mutex_lock(&ha->vport_lock);
 224        ret = qla24xx_modify_vp_config(vha);
 225        mutex_unlock(&ha->vport_lock);
 226
 227        if (ret != QLA_SUCCESS) {
 228                fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 229                goto enable_failed;
 230        }
 231
 232        ql_dbg(ql_dbg_taskm, vha, 0x801a,
 233            "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
 234        return 0;
 235
 236enable_failed:
 237        ql_dbg(ql_dbg_taskm, vha, 0x801b,
 238            "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
 239        return 1;
 240}
 241
 242static void
 243qla24xx_configure_vp(scsi_qla_host_t *vha)
 244{
 245        struct fc_vport *fc_vport;
 246        int ret;
 247
 248        fc_vport = vha->fc_vport;
 249
 250        ql_dbg(ql_dbg_vport, vha, 0xa002,
 251            "%s: change request #3.\n", __func__);
 252        ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
 253        if (ret != QLA_SUCCESS) {
 254                ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
 255                    "receiving of RSCN requests: 0x%x.\n", ret);
 256                return;
 257        } else {
 258                /* Corresponds to SCR enabled */
 259                clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
 260        }
 261
 262        vha->flags.online = 1;
 263        if (qla24xx_configure_vhba(vha))
 264                return;
 265
 266        atomic_set(&vha->vp_state, VP_ACTIVE);
 267        fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
 268}
 269
 270void
 271qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 272{
 273        scsi_qla_host_t *vha, *tvp;
 274        struct qla_hw_data *ha = rsp->hw;
 275        int i = 0;
 276        unsigned long flags;
 277
 278        spin_lock_irqsave(&ha->vport_slock, flags);
 279        list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
 280                if (vha->vp_idx) {
 281                        if (test_bit(VPORT_DELETE, &vha->dpc_flags))
 282                                continue;
 283
 284                        atomic_inc(&vha->vref_count);
 285                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 286
 287                        switch (mb[0]) {
 288                        case MBA_LIP_OCCURRED:
 289                        case MBA_LOOP_UP:
 290                        case MBA_LOOP_DOWN:
 291                        case MBA_LIP_RESET:
 292                        case MBA_POINT_TO_POINT:
 293                        case MBA_CHG_IN_CONNECTION:
 294                                ql_dbg(ql_dbg_async, vha, 0x5024,
 295                                    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
 296                                    i, *mb, vha);
 297                                qla2x00_async_event(vha, rsp, mb);
 298                                break;
 299                        case MBA_PORT_UPDATE:
 300                        case MBA_RSCN_UPDATE:
 301                                if ((mb[3] & 0xff) == vha->vp_idx) {
 302                                        ql_dbg(ql_dbg_async, vha, 0x5024,
 303                                            "Async_event for VP[%d], mb=0x%x vha=%p\n",
 304                                            i, *mb, vha);
 305                                        qla2x00_async_event(vha, rsp, mb);
 306                                }
 307                                break;
 308                        }
 309
 310                        spin_lock_irqsave(&ha->vport_slock, flags);
 311                        atomic_dec(&vha->vref_count);
 312                        wake_up(&vha->vref_waitq);
 313                }
 314                i++;
 315        }
 316        spin_unlock_irqrestore(&ha->vport_slock, flags);
 317}
 318
 319int
 320qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 321{
 322        fc_port_t *fcport;
 323
 324        /*
 325         * To exclusively reset vport, we need to log it out first.
 326         * Note: This control_vp can fail if ISP reset is already
 327         * issued, this is expected, as the vp would be already
 328         * logged out due to ISP reset.
 329         */
 330        if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
 331                qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 332                list_for_each_entry(fcport, &vha->vp_fcports, list)
 333                        fcport->logout_on_delete = 0;
 334        }
 335
 336        /*
 337         * Physical port will do most of the abort and recovery work. We can
 338         * just treat it as a loop down
 339         */
 340        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 341                atomic_set(&vha->loop_state, LOOP_DOWN);
 342                qla2x00_mark_all_devices_lost(vha);
 343        } else {
 344                if (!atomic_read(&vha->loop_down_timer))
 345                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 346        }
 347
 348        ql_dbg(ql_dbg_taskm, vha, 0x801d,
 349            "Scheduling enable of Vport %d.\n", vha->vp_idx);
 350
 351        return qla24xx_enable_vp(vha);
 352}
 353
 354static int
 355qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 356{
 357        struct qla_hw_data *ha = vha->hw;
 358        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 359
 360        ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
 361            "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
 362
 363        /* Check if Fw is ready to configure VP first */
 364        if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
 365                if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
 366                        /* VP acquired. complete port configuration */
 367                        ql_dbg(ql_dbg_dpc, vha, 0x4014,
 368                            "Configure VP scheduled.\n");
 369                        qla24xx_configure_vp(vha);
 370                        ql_dbg(ql_dbg_dpc, vha, 0x4015,
 371                            "Configure VP end.\n");
 372                        return 0;
 373                }
 374        }
 375
 376        if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
 377                if (atomic_read(&vha->loop_state) == LOOP_READY) {
 378                        qla24xx_process_purex_list(&vha->purex_list);
 379                        clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
 380                }
 381        }
 382
 383        if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
 384                ql_dbg(ql_dbg_dpc, vha, 0x4016,
 385                    "FCPort update scheduled.\n");
 386                qla2x00_update_fcports(vha);
 387                clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
 388                ql_dbg(ql_dbg_dpc, vha, 0x4017,
 389                    "FCPort update end.\n");
 390        }
 391
 392        if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
 393            !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
 394            atomic_read(&vha->loop_state) != LOOP_DOWN) {
 395
 396                if (!vha->relogin_jif ||
 397                    time_after_eq(jiffies, vha->relogin_jif)) {
 398                        vha->relogin_jif = jiffies + HZ;
 399                        clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 400
 401                        ql_dbg(ql_dbg_dpc, vha, 0x4018,
 402                            "Relogin needed scheduled.\n");
 403                        qla24xx_post_relogin_work(vha);
 404                }
 405        }
 406
 407        if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
 408            (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
 409                clear_bit(RESET_ACTIVE, &vha->dpc_flags);
 410        }
 411
 412        if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
 413                if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
 414                        ql_dbg(ql_dbg_dpc, vha, 0x401a,
 415                            "Loop resync scheduled.\n");
 416                        qla2x00_loop_resync(vha);
 417                        clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
 418                        ql_dbg(ql_dbg_dpc, vha, 0x401b,
 419                            "Loop resync end.\n");
 420                }
 421        }
 422
 423        ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
 424            "Exiting %s.\n", __func__);
 425        return 0;
 426}
 427
 428void
 429qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
 430{
 431        struct qla_hw_data *ha = vha->hw;
 432        scsi_qla_host_t *vp, *tvp;
 433        unsigned long flags = 0;
 434
 435        if (vha->vp_idx)
 436                return;
 437        if (list_empty(&ha->vp_list))
 438                return;
 439
 440        clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 441
 442        if (!(ha->current_topology & ISP_CFG_F))
 443                return;
 444
 445        spin_lock_irqsave(&ha->vport_slock, flags);
 446        list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
 447                if (vp->vp_idx) {
 448                        atomic_inc(&vp->vref_count);
 449                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 450
 451                        qla2x00_do_dpc_vp(vp);
 452
 453                        spin_lock_irqsave(&ha->vport_slock, flags);
 454                        atomic_dec(&vp->vref_count);
 455                }
 456        }
 457        spin_unlock_irqrestore(&ha->vport_slock, flags);
 458}
 459
 460int
 461qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 462{
 463        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
 464        struct qla_hw_data *ha = base_vha->hw;
 465        scsi_qla_host_t *vha;
 466        uint8_t port_name[WWN_SIZE];
 467
 468        if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
 469                return VPCERR_UNSUPPORTED;
 470
 471        /* Check up the F/W and H/W support NPIV */
 472        if (!ha->flags.npiv_supported)
 473                return VPCERR_UNSUPPORTED;
 474
 475        /* Check up whether npiv supported switch presented */
 476        if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
 477                return VPCERR_NO_FABRIC_SUPP;
 478
 479        /* Check up unique WWPN */
 480        u64_to_wwn(fc_vport->port_name, port_name);
 481        if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
 482                return VPCERR_BAD_WWN;
 483        vha = qla24xx_find_vhost_by_name(ha, port_name);
 484        if (vha)
 485                return VPCERR_BAD_WWN;
 486
 487        /* Check up max-npiv-supports */
 488        if (ha->num_vhosts > ha->max_npiv_vports) {
 489                ql_dbg(ql_dbg_vport, vha, 0xa004,
 490                    "num_vhosts %ud is bigger "
 491                    "than max_npiv_vports %ud.\n",
 492                    ha->num_vhosts, ha->max_npiv_vports);
 493                return VPCERR_UNSUPPORTED;
 494        }
 495        return 0;
 496}
 497
 498scsi_qla_host_t *
 499qla24xx_create_vhost(struct fc_vport *fc_vport)
 500{
 501        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
 502        struct qla_hw_data *ha = base_vha->hw;
 503        scsi_qla_host_t *vha;
 504        struct scsi_host_template *sht = &qla2xxx_driver_template;
 505        struct Scsi_Host *host;
 506
 507        vha = qla2x00_create_host(sht, ha);
 508        if (!vha) {
 509                ql_log(ql_log_warn, vha, 0xa005,
 510                    "scsi_host_alloc() failed for vport.\n");
 511                return(NULL);
 512        }
 513
 514        host = vha->host;
 515        fc_vport->dd_data = vha;
 516        /* New host info */
 517        u64_to_wwn(fc_vport->node_name, vha->node_name);
 518        u64_to_wwn(fc_vport->port_name, vha->port_name);
 519
 520        vha->fc_vport = fc_vport;
 521        vha->device_flags = 0;
 522        vha->vp_idx = qla24xx_allocate_vp_id(vha);
 523        if (vha->vp_idx > ha->max_npiv_vports) {
 524                ql_dbg(ql_dbg_vport, vha, 0xa006,
 525                    "Couldn't allocate vp_id.\n");
 526                goto create_vhost_failed;
 527        }
 528        vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
 529
 530        vha->dpc_flags = 0L;
 531        ha->dpc_active = 0;
 532        set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 533        set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
 534
 535        /*
 536         * To fix the issue of processing a parent's RSCN for the vport before
 537         * its SCR is complete.
 538         */
 539        set_bit(VP_SCR_NEEDED, &vha->vp_flags);
 540        atomic_set(&vha->loop_state, LOOP_DOWN);
 541        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 542
 543        qla2x00_start_timer(vha, WATCH_INTERVAL);
 544
 545        vha->req = base_vha->req;
 546        vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
 547        host->can_queue = base_vha->req->length + 128;
 548        host->cmd_per_lun = 3;
 549        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
 550                host->max_cmd_len = 32;
 551        else
 552                host->max_cmd_len = MAX_CMDSZ;
 553        host->max_channel = MAX_BUSES - 1;
 554        host->max_lun = ql2xmaxlun;
 555        host->unique_id = host->host_no;
 556        host->max_id = ha->max_fibre_devices;
 557        host->transportt = qla2xxx_transport_vport_template;
 558
 559        ql_dbg(ql_dbg_vport, vha, 0xa007,
 560            "Detect vport hba %ld at address = %p.\n",
 561            vha->host_no, vha);
 562
 563        vha->flags.init_done = 1;
 564
 565        mutex_lock(&ha->vport_lock);
 566        set_bit(vha->vp_idx, ha->vp_idx_map);
 567        ha->cur_vport_count++;
 568        mutex_unlock(&ha->vport_lock);
 569
 570        return vha;
 571
 572create_vhost_failed:
 573        return NULL;
 574}
 575
 576static void
 577qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
 578{
 579        struct qla_hw_data *ha = vha->hw;
 580        uint16_t que_id = req->id;
 581
 582        dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
 583                sizeof(request_t), req->ring, req->dma);
 584        req->ring = NULL;
 585        req->dma = 0;
 586        if (que_id) {
 587                ha->req_q_map[que_id] = NULL;
 588                mutex_lock(&ha->vport_lock);
 589                clear_bit(que_id, ha->req_qid_map);
 590                mutex_unlock(&ha->vport_lock);
 591        }
 592        kfree(req->outstanding_cmds);
 593        kfree(req);
 594        req = NULL;
 595}
 596
 597static void
 598qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 599{
 600        struct qla_hw_data *ha = vha->hw;
 601        uint16_t que_id = rsp->id;
 602
 603        if (rsp->msix && rsp->msix->have_irq) {
 604                free_irq(rsp->msix->vector, rsp->msix->handle);
 605                rsp->msix->have_irq = 0;
 606                rsp->msix->in_use = 0;
 607                rsp->msix->handle = NULL;
 608        }
 609        dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
 610                sizeof(response_t), rsp->ring, rsp->dma);
 611        rsp->ring = NULL;
 612        rsp->dma = 0;
 613        if (que_id) {
 614                ha->rsp_q_map[que_id] = NULL;
 615                mutex_lock(&ha->vport_lock);
 616                clear_bit(que_id, ha->rsp_qid_map);
 617                mutex_unlock(&ha->vport_lock);
 618        }
 619        kfree(rsp);
 620        rsp = NULL;
 621}
 622
 623int
 624qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
 625{
 626        int ret = QLA_SUCCESS;
 627
 628        if (req && vha->flags.qpairs_req_created) {
 629                req->options |= BIT_0;
 630                ret = qla25xx_init_req_que(vha, req);
 631                if (ret != QLA_SUCCESS)
 632                        return QLA_FUNCTION_FAILED;
 633
 634                qla25xx_free_req_que(vha, req);
 635        }
 636
 637        return ret;
 638}
 639
 640int
 641qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 642{
 643        int ret = QLA_SUCCESS;
 644
 645        if (rsp && vha->flags.qpairs_rsp_created) {
 646                rsp->options |= BIT_0;
 647                ret = qla25xx_init_rsp_que(vha, rsp);
 648                if (ret != QLA_SUCCESS)
 649                        return QLA_FUNCTION_FAILED;
 650
 651                qla25xx_free_rsp_que(vha, rsp);
 652        }
 653
 654        return ret;
 655}
 656
 657/* Delete all queues for a given vhost */
 658int
 659qla25xx_delete_queues(struct scsi_qla_host *vha)
 660{
 661        int cnt, ret = 0;
 662        struct req_que *req = NULL;
 663        struct rsp_que *rsp = NULL;
 664        struct qla_hw_data *ha = vha->hw;
 665        struct qla_qpair *qpair, *tqpair;
 666
 667        if (ql2xmqsupport || ql2xnvmeenable) {
 668                list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
 669                    qp_list_elem)
 670                        qla2xxx_delete_qpair(vha, qpair);
 671        } else {
 672                /* Delete request queues */
 673                for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
 674                        req = ha->req_q_map[cnt];
 675                        if (req && test_bit(cnt, ha->req_qid_map)) {
 676                                ret = qla25xx_delete_req_que(vha, req);
 677                                if (ret != QLA_SUCCESS) {
 678                                        ql_log(ql_log_warn, vha, 0x00ea,
 679                                            "Couldn't delete req que %d.\n",
 680                                            req->id);
 681                                        return ret;
 682                                }
 683                        }
 684                }
 685
 686                /* Delete response queues */
 687                for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
 688                        rsp = ha->rsp_q_map[cnt];
 689                        if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
 690                                ret = qla25xx_delete_rsp_que(vha, rsp);
 691                                if (ret != QLA_SUCCESS) {
 692                                        ql_log(ql_log_warn, vha, 0x00eb,
 693                                            "Couldn't delete rsp que %d.\n",
 694                                            rsp->id);
 695                                        return ret;
 696                                }
 697                        }
 698                }
 699        }
 700
 701        return ret;
 702}
 703
 704int
 705qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
 706    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
 707{
 708        int ret = 0;
 709        struct req_que *req = NULL;
 710        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 711        struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
 712        uint16_t que_id = 0;
 713        device_reg_t *reg;
 714        uint32_t cnt;
 715
 716        req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
 717        if (req == NULL) {
 718                ql_log(ql_log_fatal, base_vha, 0x00d9,
 719                    "Failed to allocate memory for request queue.\n");
 720                goto failed;
 721        }
 722
 723        req->length = REQUEST_ENTRY_CNT_24XX;
 724        req->ring = dma_alloc_coherent(&ha->pdev->dev,
 725                        (req->length + 1) * sizeof(request_t),
 726                        &req->dma, GFP_KERNEL);
 727        if (req->ring == NULL) {
 728                ql_log(ql_log_fatal, base_vha, 0x00da,
 729                    "Failed to allocate memory for request_ring.\n");
 730                goto que_failed;
 731        }
 732
 733        ret = qla2x00_alloc_outstanding_cmds(ha, req);
 734        if (ret != QLA_SUCCESS)
 735                goto que_failed;
 736
 737        mutex_lock(&ha->mq_lock);
 738        que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
 739        if (que_id >= ha->max_req_queues) {
 740                mutex_unlock(&ha->mq_lock);
 741                ql_log(ql_log_warn, base_vha, 0x00db,
 742                    "No resources to create additional request queue.\n");
 743                goto que_failed;
 744        }
 745        set_bit(que_id, ha->req_qid_map);
 746        ha->req_q_map[que_id] = req;
 747        req->rid = rid;
 748        req->vp_idx = vp_idx;
 749        req->qos = qos;
 750
 751        ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
 752            "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
 753            que_id, req->rid, req->vp_idx, req->qos);
 754        ql_dbg(ql_dbg_init, base_vha, 0x00dc,
 755            "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
 756            que_id, req->rid, req->vp_idx, req->qos);
 757        if (rsp_que < 0)
 758                req->rsp = NULL;
 759        else
 760                req->rsp = ha->rsp_q_map[rsp_que];
 761        /* Use alternate PCI bus number */
 762        if (MSB(req->rid))
 763                options |= BIT_4;
 764        /* Use alternate PCI devfn */
 765        if (LSB(req->rid))
 766                options |= BIT_5;
 767        req->options = options;
 768
 769        ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
 770            "options=0x%x.\n", req->options);
 771        ql_dbg(ql_dbg_init, base_vha, 0x00dd,
 772            "options=0x%x.\n", req->options);
 773        for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
 774                req->outstanding_cmds[cnt] = NULL;
 775        req->current_outstanding_cmd = 1;
 776
 777        req->ring_ptr = req->ring;
 778        req->ring_index = 0;
 779        req->cnt = req->length;
 780        req->id = que_id;
 781        reg = ISP_QUE_REG(ha, que_id);
 782        req->req_q_in = &reg->isp25mq.req_q_in;
 783        req->req_q_out = &reg->isp25mq.req_q_out;
 784        req->max_q_depth = ha->req_q_map[0]->max_q_depth;
 785        req->out_ptr = (uint16_t *)(req->ring + req->length);
 786        mutex_unlock(&ha->mq_lock);
 787        ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
 788            "ring_ptr=%p ring_index=%d, "
 789            "cnt=%d id=%d max_q_depth=%d.\n",
 790            req->ring_ptr, req->ring_index,
 791            req->cnt, req->id, req->max_q_depth);
 792        ql_dbg(ql_dbg_init, base_vha, 0x00de,
 793            "ring_ptr=%p ring_index=%d, "
 794            "cnt=%d id=%d max_q_depth=%d.\n",
 795            req->ring_ptr, req->ring_index, req->cnt,
 796            req->id, req->max_q_depth);
 797
 798        if (startqp) {
 799                ret = qla25xx_init_req_que(base_vha, req);
 800                if (ret != QLA_SUCCESS) {
 801                        ql_log(ql_log_fatal, base_vha, 0x00df,
 802                            "%s failed.\n", __func__);
 803                        mutex_lock(&ha->mq_lock);
 804                        clear_bit(que_id, ha->req_qid_map);
 805                        mutex_unlock(&ha->mq_lock);
 806                        goto que_failed;
 807                }
 808                vha->flags.qpairs_req_created = 1;
 809        }
 810
 811        return req->id;
 812
 813que_failed:
 814        qla25xx_free_req_que(base_vha, req);
 815failed:
 816        return 0;
 817}
 818
 819static void qla_do_work(struct work_struct *work)
 820{
 821        unsigned long flags;
 822        struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
 823        struct scsi_qla_host *vha = qpair->vha;
 824
 825        spin_lock_irqsave(&qpair->qp_lock, flags);
 826        qla24xx_process_response_queue(vha, qpair->rsp);
 827        spin_unlock_irqrestore(&qpair->qp_lock, flags);
 828
 829}
 830
 831/* create response queue */
 832int
 833qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
 834    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
 835{
 836        int ret = 0;
 837        struct rsp_que *rsp = NULL;
 838        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 839        struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
 840        uint16_t que_id = 0;
 841        device_reg_t *reg;
 842
 843        rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
 844        if (rsp == NULL) {
 845                ql_log(ql_log_warn, base_vha, 0x0066,
 846                    "Failed to allocate memory for response queue.\n");
 847                goto failed;
 848        }
 849
 850        rsp->length = RESPONSE_ENTRY_CNT_MQ;
 851        rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
 852                        (rsp->length + 1) * sizeof(response_t),
 853                        &rsp->dma, GFP_KERNEL);
 854        if (rsp->ring == NULL) {
 855                ql_log(ql_log_warn, base_vha, 0x00e1,
 856                    "Failed to allocate memory for response ring.\n");
 857                goto que_failed;
 858        }
 859
 860        mutex_lock(&ha->mq_lock);
 861        que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
 862        if (que_id >= ha->max_rsp_queues) {
 863                mutex_unlock(&ha->mq_lock);
 864                ql_log(ql_log_warn, base_vha, 0x00e2,
 865                    "No resources to create additional request queue.\n");
 866                goto que_failed;
 867        }
 868        set_bit(que_id, ha->rsp_qid_map);
 869
 870        rsp->msix = qpair->msix;
 871
 872        ha->rsp_q_map[que_id] = rsp;
 873        rsp->rid = rid;
 874        rsp->vp_idx = vp_idx;
 875        rsp->hw = ha;
 876        ql_dbg(ql_dbg_init, base_vha, 0x00e4,
 877            "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
 878            que_id, rsp->rid, rsp->vp_idx, rsp->hw);
 879        /* Use alternate PCI bus number */
 880        if (MSB(rsp->rid))
 881                options |= BIT_4;
 882        /* Use alternate PCI devfn */
 883        if (LSB(rsp->rid))
 884                options |= BIT_5;
 885        /* Enable MSIX handshake mode on for uncapable adapters */
 886        if (!IS_MSIX_NACK_CAPABLE(ha))
 887                options |= BIT_6;
 888
 889        /* Set option to indicate response queue creation */
 890        options |= BIT_1;
 891
 892        rsp->options = options;
 893        rsp->id = que_id;
 894        reg = ISP_QUE_REG(ha, que_id);
 895        rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
 896        rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
 897        rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
 898        mutex_unlock(&ha->mq_lock);
 899        ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
 900            "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
 901            rsp->options, rsp->id, rsp->rsp_q_in,
 902            rsp->rsp_q_out);
 903        ql_dbg(ql_dbg_init, base_vha, 0x00e5,
 904            "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
 905            rsp->options, rsp->id, rsp->rsp_q_in,
 906            rsp->rsp_q_out);
 907
 908        ret = qla25xx_request_irq(ha, qpair, qpair->msix,
 909                ha->flags.disable_msix_handshake ?
 910                QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
 911        if (ret)
 912                goto que_failed;
 913
 914        if (startqp) {
 915                ret = qla25xx_init_rsp_que(base_vha, rsp);
 916                if (ret != QLA_SUCCESS) {
 917                        ql_log(ql_log_fatal, base_vha, 0x00e7,
 918                            "%s failed.\n", __func__);
 919                        mutex_lock(&ha->mq_lock);
 920                        clear_bit(que_id, ha->rsp_qid_map);
 921                        mutex_unlock(&ha->mq_lock);
 922                        goto que_failed;
 923                }
 924                vha->flags.qpairs_rsp_created = 1;
 925        }
 926        rsp->req = NULL;
 927
 928        qla2x00_init_response_q_entries(rsp);
 929        if (qpair->hw->wq)
 930                INIT_WORK(&qpair->q_work, qla_do_work);
 931        return rsp->id;
 932
 933que_failed:
 934        qla25xx_free_rsp_que(base_vha, rsp);
 935failed:
 936        return 0;
 937}
 938
 939static void qla_ctrlvp_sp_done(srb_t *sp, int res)
 940{
 941        if (sp->comp)
 942                complete(sp->comp);
 943        /* don't free sp here. Let the caller do the free */
 944}
 945
 946/**
 947 * qla24xx_control_vp() - Enable a virtual port for given host
 948 * @vha:        adapter block pointer
 949 * @cmd:        command type to be sent for enable virtual port
 950 *
 951 * Return:      qla2xxx local function return status code.
 952 */
 953int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 954{
 955        int rval = QLA_MEMORY_ALLOC_FAILED;
 956        struct qla_hw_data *ha = vha->hw;
 957        int     vp_index = vha->vp_idx;
 958        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 959        DECLARE_COMPLETION_ONSTACK(comp);
 960        srb_t *sp;
 961
 962        ql_dbg(ql_dbg_vport, vha, 0x10c1,
 963            "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
 964
 965        if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
 966                return QLA_PARAMETER_ERROR;
 967
 968        /* ref: INIT */
 969        sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
 970        if (!sp)
 971                return rval;
 972
 973        sp->type = SRB_CTRL_VP;
 974        sp->name = "ctrl_vp";
 975        sp->comp = &comp;
 976        qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
 977                              qla_ctrlvp_sp_done);
 978        sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
 979        sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
 980
 981        rval = qla2x00_start_sp(sp);
 982        if (rval != QLA_SUCCESS) {
 983                ql_dbg(ql_dbg_async, vha, 0xffff,
 984                    "%s: %s Failed submission. %x.\n",
 985                    __func__, sp->name, rval);
 986                goto done;
 987        }
 988
 989        ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
 990            sp->name, sp->handle);
 991
 992        wait_for_completion(&comp);
 993        sp->comp = NULL;
 994
 995        rval = sp->rc;
 996        switch (rval) {
 997        case QLA_FUNCTION_TIMEOUT:
 998                ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
 999                    __func__, sp->name, rval);
1000                break;
1001        case QLA_SUCCESS:
1002                ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
1003                    __func__, sp->name);
1004                break;
1005        default:
1006                ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
1007                    __func__, sp->name, rval);
1008                break;
1009        }
1010done:
1011        /* ref: INIT */
1012        kref_put(&sp->cmd_kref, qla2x00_sp_release);
1013        return rval;
1014}
1015