linux/drivers/scsi/qla2xxx/qla_nvme.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * QLogic Fibre Channel HBA Driver
   4 * Copyright (c)  2003-2017 QLogic Corporation
   5 */
   6#include "qla_nvme.h"
   7#include <linux/scatterlist.h>
   8#include <linux/delay.h>
   9#include <linux/nvme.h>
  10#include <linux/nvme-fc.h>
  11#include <linux/blk-mq-pci.h>
  12#include <linux/blk-mq.h>
  13
  14static struct nvme_fc_port_template qla_nvme_fc_transport;
  15
  16int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
  17{
  18        struct qla_nvme_rport *rport;
  19        struct nvme_fc_port_info req;
  20        int ret;
  21
  22        if (!IS_ENABLED(CONFIG_NVME_FC))
  23                return 0;
  24
  25        if (!vha->flags.nvme_enabled) {
  26                ql_log(ql_log_info, vha, 0x2100,
  27                    "%s: Not registering target since Host NVME is not enabled\n",
  28                    __func__);
  29                return 0;
  30        }
  31
  32        if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
  33                return 0;
  34
  35        if (!(fcport->nvme_prli_service_param &
  36            (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
  37                (fcport->nvme_flag & NVME_FLAG_REGISTERED))
  38                return 0;
  39
  40        if (atomic_read(&fcport->state) == FCS_ONLINE)
  41                return 0;
  42
  43        qla2x00_set_fcport_state(fcport, FCS_ONLINE);
  44
  45        fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
  46
  47        memset(&req, 0, sizeof(struct nvme_fc_port_info));
  48        req.port_name = wwn_to_u64(fcport->port_name);
  49        req.node_name = wwn_to_u64(fcport->node_name);
  50        req.port_role = 0;
  51        req.dev_loss_tmo = fcport->dev_loss_tmo;
  52
  53        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
  54                req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  55
  56        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
  57                req.port_role |= FC_PORT_ROLE_NVME_TARGET;
  58
  59        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
  60                req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
  61
  62        req.port_id = fcport->d_id.b24;
  63
  64        ql_log(ql_log_info, vha, 0x2102,
  65            "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
  66            __func__, req.node_name, req.port_name,
  67            req.port_id);
  68
  69        ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
  70            &fcport->nvme_remote_port);
  71        if (ret) {
  72                ql_log(ql_log_warn, vha, 0x212e,
  73                    "Failed to register remote port. Transport returned %d\n",
  74                    ret);
  75                return ret;
  76        }
  77
  78        nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
  79                                       fcport->dev_loss_tmo);
  80
  81        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
  82                ql_log(ql_log_info, vha, 0x212a,
  83                       "PortID:%06x Supports SLER\n", req.port_id);
  84
  85        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
  86                ql_log(ql_log_info, vha, 0x212b,
  87                       "PortID:%06x Supports PI control\n", req.port_id);
  88
  89        rport = fcport->nvme_remote_port->private;
  90        rport->fcport = fcport;
  91
  92        fcport->nvme_flag |= NVME_FLAG_REGISTERED;
  93        return 0;
  94}
  95
  96/* Allocate a queue for NVMe traffic */
  97static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
  98    unsigned int qidx, u16 qsize, void **handle)
  99{
 100        struct scsi_qla_host *vha;
 101        struct qla_hw_data *ha;
 102        struct qla_qpair *qpair;
 103
 104        /* Map admin queue and 1st IO queue to index 0 */
 105        if (qidx)
 106                qidx--;
 107
 108        vha = (struct scsi_qla_host *)lport->private;
 109        ha = vha->hw;
 110
 111        ql_log(ql_log_info, vha, 0x2104,
 112            "%s: handle %p, idx =%d, qsize %d\n",
 113            __func__, handle, qidx, qsize);
 114
 115        if (qidx > qla_nvme_fc_transport.max_hw_queues) {
 116                ql_log(ql_log_warn, vha, 0x212f,
 117                    "%s: Illegal qidx=%d. Max=%d\n",
 118                    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
 119                return -EINVAL;
 120        }
 121
 122        /* Use base qpair if max_qpairs is 0 */
 123        if (!ha->max_qpairs) {
 124                qpair = ha->base_qpair;
 125        } else {
 126                if (ha->queue_pair_map[qidx]) {
 127                        *handle = ha->queue_pair_map[qidx];
 128                        ql_log(ql_log_info, vha, 0x2121,
 129                               "Returning existing qpair of %p for idx=%x\n",
 130                               *handle, qidx);
 131                        return 0;
 132                }
 133
 134                qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
 135                if (!qpair) {
 136                        ql_log(ql_log_warn, vha, 0x2122,
 137                               "Failed to allocate qpair\n");
 138                        return -EINVAL;
 139                }
 140        }
 141        *handle = qpair;
 142
 143        return 0;
 144}
 145
 146static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
 147{
 148        struct srb *sp = container_of(kref, struct srb, cmd_kref);
 149        struct nvme_private *priv = (struct nvme_private *)sp->priv;
 150        struct nvmefc_fcp_req *fd;
 151        struct srb_iocb *nvme;
 152        unsigned long flags;
 153
 154        if (!priv)
 155                goto out;
 156
 157        nvme = &sp->u.iocb_cmd;
 158        fd = nvme->u.nvme.desc;
 159
 160        spin_lock_irqsave(&priv->cmd_lock, flags);
 161        priv->sp = NULL;
 162        sp->priv = NULL;
 163        if (priv->comp_status == QLA_SUCCESS) {
 164                fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
 165                fd->status = NVME_SC_SUCCESS;
 166        } else {
 167                fd->rcv_rsplen = 0;
 168                fd->transferred_length = 0;
 169                fd->status = NVME_SC_INTERNAL;
 170        }
 171        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 172
 173        fd->done(fd);
 174out:
 175        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 176}
 177
 178static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
 179{
 180        if (sp->flags & SRB_DMA_VALID) {
 181                struct srb_iocb *nvme = &sp->u.iocb_cmd;
 182                struct qla_hw_data *ha = sp->fcport->vha->hw;
 183
 184                dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
 185                                 fd->rqstlen, DMA_TO_DEVICE);
 186                sp->flags &= ~SRB_DMA_VALID;
 187        }
 188}
 189
 190static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
 191{
 192        struct srb *sp = container_of(kref, struct srb, cmd_kref);
 193        struct nvme_private *priv = (struct nvme_private *)sp->priv;
 194        struct nvmefc_ls_req *fd;
 195        unsigned long flags;
 196
 197        if (!priv)
 198                goto out;
 199
 200        spin_lock_irqsave(&priv->cmd_lock, flags);
 201        priv->sp = NULL;
 202        sp->priv = NULL;
 203        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 204
 205        fd = priv->fd;
 206
 207        qla_nvme_ls_unmap(sp, fd);
 208        fd->done(fd, priv->comp_status);
 209out:
 210        qla2x00_rel_sp(sp);
 211}
 212
 213static void qla_nvme_ls_complete(struct work_struct *work)
 214{
 215        struct nvme_private *priv =
 216                container_of(work, struct nvme_private, ls_work);
 217
 218        kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
 219}
 220
 221static void qla_nvme_sp_ls_done(srb_t *sp, int res)
 222{
 223        struct nvme_private *priv = sp->priv;
 224
 225        if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
 226                return;
 227
 228        if (res)
 229                res = -EINVAL;
 230
 231        priv->comp_status = res;
 232        INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
 233        schedule_work(&priv->ls_work);
 234}
 235
 236/* it assumed that QPair lock is held. */
 237static void qla_nvme_sp_done(srb_t *sp, int res)
 238{
 239        struct nvme_private *priv = sp->priv;
 240
 241        priv->comp_status = res;
 242        kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
 243
 244        return;
 245}
 246
 247static void qla_nvme_abort_work(struct work_struct *work)
 248{
 249        struct nvme_private *priv =
 250                container_of(work, struct nvme_private, abort_work);
 251        srb_t *sp = priv->sp;
 252        fc_port_t *fcport = sp->fcport;
 253        struct qla_hw_data *ha = fcport->vha->hw;
 254        int rval, abts_done_called = 1;
 255        bool io_wait_for_abort_done;
 256        uint32_t handle;
 257
 258        ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
 259               "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
 260               __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
 261
 262        if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
 263                goto out;
 264
 265        if (ha->flags.host_shutting_down) {
 266                ql_log(ql_log_info, sp->fcport->vha, 0xffff,
 267                    "%s Calling done on sp: %p, type: 0x%x\n",
 268                    __func__, sp, sp->type);
 269                sp->done(sp, 0);
 270                goto out;
 271        }
 272
 273        /*
 274         * sp may not be valid after abort_command if return code is either
 275         * SUCCESS or ERR_FROM_FW codes, so cache the value here.
 276         */
 277        io_wait_for_abort_done = ql2xabts_wait_nvme &&
 278                                        QLA_ABTS_WAIT_ENABLED(sp);
 279        handle = sp->handle;
 280
 281        rval = ha->isp_ops->abort_command(sp);
 282
 283        ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
 284            "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
 285            __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
 286            sp, handle, fcport, rval);
 287
 288        /*
 289         * If async tmf is enabled, the abort callback is called only on
 290         * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
 291         */
 292        if (ql2xasynctmfenable &&
 293            rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
 294                abts_done_called = 0;
 295
 296        /*
 297         * Returned before decreasing kref so that I/O requests
 298         * are waited until ABTS complete. This kref is decreased
 299         * at qla24xx_abort_sp_done function.
 300         */
 301        if (abts_done_called && io_wait_for_abort_done)
 302                return;
 303out:
 304        /* kref_get was done before work was schedule. */
 305        kref_put(&sp->cmd_kref, sp->put_fn);
 306}
 307
 308static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
 309    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 310{
 311        struct nvme_private *priv = fd->private;
 312        unsigned long flags;
 313
 314        spin_lock_irqsave(&priv->cmd_lock, flags);
 315        if (!priv->sp) {
 316                spin_unlock_irqrestore(&priv->cmd_lock, flags);
 317                return;
 318        }
 319
 320        if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
 321                spin_unlock_irqrestore(&priv->cmd_lock, flags);
 322                return;
 323        }
 324        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 325
 326        INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
 327        schedule_work(&priv->abort_work);
 328}
 329
 330static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 331    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 332{
 333        struct qla_nvme_rport *qla_rport = rport->private;
 334        fc_port_t *fcport = qla_rport->fcport;
 335        struct srb_iocb   *nvme;
 336        struct nvme_private *priv = fd->private;
 337        struct scsi_qla_host *vha;
 338        int     rval = QLA_FUNCTION_FAILED;
 339        struct qla_hw_data *ha;
 340        srb_t           *sp;
 341
 342        if (!fcport || fcport->deleted)
 343                return rval;
 344
 345        vha = fcport->vha;
 346        ha = vha->hw;
 347
 348        if (!ha->flags.fw_started)
 349                return rval;
 350
 351        /* Alloc SRB structure */
 352        sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 353        if (!sp)
 354                return rval;
 355
 356        sp->type = SRB_NVME_LS;
 357        sp->name = "nvme_ls";
 358        sp->done = qla_nvme_sp_ls_done;
 359        sp->put_fn = qla_nvme_release_ls_cmd_kref;
 360        sp->priv = priv;
 361        priv->sp = sp;
 362        kref_init(&sp->cmd_kref);
 363        spin_lock_init(&priv->cmd_lock);
 364        nvme = &sp->u.iocb_cmd;
 365        priv->fd = fd;
 366        nvme->u.nvme.desc = fd;
 367        nvme->u.nvme.dir = 0;
 368        nvme->u.nvme.dl = 0;
 369        nvme->u.nvme.cmd_len = fd->rqstlen;
 370        nvme->u.nvme.rsp_len = fd->rsplen;
 371        nvme->u.nvme.rsp_dma = fd->rspdma;
 372        nvme->u.nvme.timeout_sec = fd->timeout;
 373        nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
 374            fd->rqstlen, DMA_TO_DEVICE);
 375        dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
 376            fd->rqstlen, DMA_TO_DEVICE);
 377
 378        sp->flags |= SRB_DMA_VALID;
 379
 380        rval = qla2x00_start_sp(sp);
 381        if (rval != QLA_SUCCESS) {
 382                ql_log(ql_log_warn, vha, 0x700e,
 383                    "qla2x00_start_sp failed = %d\n", rval);
 384                wake_up(&sp->nvme_ls_waitq);
 385                sp->priv = NULL;
 386                priv->sp = NULL;
 387                qla_nvme_ls_unmap(sp, fd);
 388                qla2x00_rel_sp(sp);
 389                return rval;
 390        }
 391
 392        return rval;
 393}
 394
 395static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
 396    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
 397    struct nvmefc_fcp_req *fd)
 398{
 399        struct nvme_private *priv = fd->private;
 400        unsigned long flags;
 401
 402        spin_lock_irqsave(&priv->cmd_lock, flags);
 403        if (!priv->sp) {
 404                spin_unlock_irqrestore(&priv->cmd_lock, flags);
 405                return;
 406        }
 407        if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
 408                spin_unlock_irqrestore(&priv->cmd_lock, flags);
 409                return;
 410        }
 411        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 412
 413        INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
 414        schedule_work(&priv->abort_work);
 415}
 416
 417static inline int qla2x00_start_nvme_mq(srb_t *sp)
 418{
 419        unsigned long   flags;
 420        uint32_t        *clr_ptr;
 421        uint32_t        handle;
 422        struct cmd_nvme *cmd_pkt;
 423        uint16_t        cnt, i;
 424        uint16_t        req_cnt;
 425        uint16_t        tot_dsds;
 426        uint16_t        avail_dsds;
 427        struct dsd64    *cur_dsd;
 428        struct req_que *req = NULL;
 429        struct rsp_que *rsp = NULL;
 430        struct scsi_qla_host *vha = sp->fcport->vha;
 431        struct qla_hw_data *ha = vha->hw;
 432        struct qla_qpair *qpair = sp->qpair;
 433        struct srb_iocb *nvme = &sp->u.iocb_cmd;
 434        struct scatterlist *sgl, *sg;
 435        struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
 436        struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
 437        uint32_t        rval = QLA_SUCCESS;
 438
 439        /* Setup qpair pointers */
 440        req = qpair->req;
 441        rsp = qpair->rsp;
 442        tot_dsds = fd->sg_cnt;
 443
 444        /* Acquire qpair specific lock */
 445        spin_lock_irqsave(&qpair->qp_lock, flags);
 446
 447        handle = qla2xxx_get_next_handle(req);
 448        if (handle == 0) {
 449                rval = -EBUSY;
 450                goto queuing_error;
 451        }
 452        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
 453        if (req->cnt < (req_cnt + 2)) {
 454                if (IS_SHADOW_REG_CAPABLE(ha)) {
 455                        cnt = *req->out_ptr;
 456                } else {
 457                        cnt = rd_reg_dword_relaxed(req->req_q_out);
 458                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
 459                                goto queuing_error;
 460                }
 461
 462                if (req->ring_index < cnt)
 463                        req->cnt = cnt - req->ring_index;
 464                else
 465                        req->cnt = req->length - (req->ring_index - cnt);
 466
 467                if (req->cnt < (req_cnt + 2)){
 468                        rval = -EBUSY;
 469                        goto queuing_error;
 470                }
 471        }
 472
 473        if (unlikely(!fd->sqid)) {
 474                if (cmd->sqe.common.opcode == nvme_admin_async_event) {
 475                        nvme->u.nvme.aen_op = 1;
 476                        atomic_inc(&ha->nvme_active_aen_cnt);
 477                }
 478        }
 479
 480        /* Build command packet. */
 481        req->current_outstanding_cmd = handle;
 482        req->outstanding_cmds[handle] = sp;
 483        sp->handle = handle;
 484        req->cnt -= req_cnt;
 485
 486        cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
 487        cmd_pkt->handle = make_handle(req->id, handle);
 488
 489        /* Zero out remaining portion of packet. */
 490        clr_ptr = (uint32_t *)cmd_pkt + 2;
 491        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 492
 493        cmd_pkt->entry_status = 0;
 494
 495        /* Update entry type to indicate Command NVME IOCB */
 496        cmd_pkt->entry_type = COMMAND_NVME;
 497
 498        /* No data transfer how do we check buffer len == 0?? */
 499        if (fd->io_dir == NVMEFC_FCP_READ) {
 500                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 501                qpair->counters.input_bytes += fd->payload_length;
 502                qpair->counters.input_requests++;
 503        } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
 504                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 505                if ((vha->flags.nvme_first_burst) &&
 506                    (sp->fcport->nvme_prli_service_param &
 507                        NVME_PRLI_SP_FIRST_BURST)) {
 508                        if ((fd->payload_length <=
 509                            sp->fcport->nvme_first_burst_size) ||
 510                                (sp->fcport->nvme_first_burst_size == 0))
 511                                cmd_pkt->control_flags |=
 512                                        cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
 513                }
 514                qpair->counters.output_bytes += fd->payload_length;
 515                qpair->counters.output_requests++;
 516        } else if (fd->io_dir == 0) {
 517                cmd_pkt->control_flags = 0;
 518        }
 519
 520        if (sp->fcport->edif.enable && fd->io_dir != 0)
 521                cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
 522
 523        /* Set BIT_13 of control flags for Async event */
 524        if (vha->flags.nvme2_enabled &&
 525            cmd->sqe.common.opcode == nvme_admin_async_event) {
 526                cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
 527        }
 528
 529        /* Set NPORT-ID */
 530        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 531        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
 532        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 533        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 534        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 535
 536        /* NVME RSP IU */
 537        cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
 538        put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
 539
 540        /* NVME CNMD IU */
 541        cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
 542        cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
 543
 544        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 545        cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
 546
 547        /* One DSD is available in the Command Type NVME IOCB */
 548        avail_dsds = 1;
 549        cur_dsd = &cmd_pkt->nvme_dsd;
 550        sgl = fd->first_sgl;
 551
 552        /* Load data segments */
 553        for_each_sg(sgl, sg, tot_dsds, i) {
 554                cont_a64_entry_t *cont_pkt;
 555
 556                /* Allocate additional continuation packets? */
 557                if (avail_dsds == 0) {
 558                        /*
 559                         * Five DSDs are available in the Continuation
 560                         * Type 1 IOCB.
 561                         */
 562
 563                        /* Adjust ring index */
 564                        req->ring_index++;
 565                        if (req->ring_index == req->length) {
 566                                req->ring_index = 0;
 567                                req->ring_ptr = req->ring;
 568                        } else {
 569                                req->ring_ptr++;
 570                        }
 571                        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 572                        put_unaligned_le32(CONTINUE_A64_TYPE,
 573                                           &cont_pkt->entry_type);
 574
 575                        cur_dsd = cont_pkt->dsd;
 576                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 577                }
 578
 579                append_dsd64(&cur_dsd, sg);
 580                avail_dsds--;
 581        }
 582
 583        /* Set total entry count. */
 584        cmd_pkt->entry_count = (uint8_t)req_cnt;
 585        wmb();
 586
 587        /* Adjust ring index. */
 588        req->ring_index++;
 589        if (req->ring_index == req->length) {
 590                req->ring_index = 0;
 591                req->ring_ptr = req->ring;
 592        } else {
 593                req->ring_ptr++;
 594        }
 595
 596        /* ignore nvme async cmd due to long timeout */
 597        if (!nvme->u.nvme.aen_op)
 598                sp->qpair->cmd_cnt++;
 599
 600        /* Set chip new ring index. */
 601        wrt_reg_dword(req->req_q_in, req->ring_index);
 602
 603        if (vha->flags.process_response_queue &&
 604            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 605                qla24xx_process_response_queue(vha, rsp);
 606
 607queuing_error:
 608        spin_unlock_irqrestore(&qpair->qp_lock, flags);
 609
 610        return rval;
 611}
 612
 613/* Post a command */
 614static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 615    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
 616    struct nvmefc_fcp_req *fd)
 617{
 618        fc_port_t *fcport;
 619        struct srb_iocb *nvme;
 620        struct scsi_qla_host *vha;
 621        int rval;
 622        srb_t *sp;
 623        struct qla_qpair *qpair = hw_queue_handle;
 624        struct nvme_private *priv = fd->private;
 625        struct qla_nvme_rport *qla_rport = rport->private;
 626
 627        if (!priv) {
 628                /* nvme association has been torn down */
 629                return -ENODEV;
 630        }
 631
 632        fcport = qla_rport->fcport;
 633
 634        if (unlikely(!qpair || !fcport || fcport->deleted))
 635                return -EBUSY;
 636
 637        if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
 638                return -ENODEV;
 639
 640        vha = fcport->vha;
 641
 642        if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
 643                return -EBUSY;
 644
 645        /*
 646         * If we know the dev is going away while the transport is still sending
 647         * IO's return busy back to stall the IO Q.  This happens when the
 648         * link goes away and fw hasn't notified us yet, but IO's are being
 649         * returned. If the dev comes back quickly we won't exhaust the IO
 650         * retry count at the core.
 651         */
 652        if (fcport->nvme_flag & NVME_FLAG_RESETTING)
 653                return -EBUSY;
 654
 655        /* Alloc SRB structure */
 656        sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
 657        if (!sp)
 658                return -EBUSY;
 659
 660        init_waitqueue_head(&sp->nvme_ls_waitq);
 661        kref_init(&sp->cmd_kref);
 662        spin_lock_init(&priv->cmd_lock);
 663        sp->priv = priv;
 664        priv->sp = sp;
 665        sp->type = SRB_NVME_CMD;
 666        sp->name = "nvme_cmd";
 667        sp->done = qla_nvme_sp_done;
 668        sp->put_fn = qla_nvme_release_fcp_cmd_kref;
 669        sp->qpair = qpair;
 670        sp->vha = vha;
 671        sp->cmd_sp = sp;
 672        nvme = &sp->u.iocb_cmd;
 673        nvme->u.nvme.desc = fd;
 674
 675        rval = qla2x00_start_nvme_mq(sp);
 676        if (rval != QLA_SUCCESS) {
 677                ql_log(ql_log_warn, vha, 0x212d,
 678                    "qla2x00_start_nvme_mq failed = %d\n", rval);
 679                wake_up(&sp->nvme_ls_waitq);
 680                sp->priv = NULL;
 681                priv->sp = NULL;
 682                qla2xxx_rel_qpair_sp(sp->qpair, sp);
 683        }
 684
 685        return rval;
 686}
 687
 688static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
 689                struct blk_mq_queue_map *map)
 690{
 691        struct scsi_qla_host *vha = lport->private;
 692        int rc;
 693
 694        rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
 695        if (rc)
 696                ql_log(ql_log_warn, vha, 0x21de,
 697                       "pci map queue failed 0x%x", rc);
 698}
 699
 700static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
 701{
 702        struct scsi_qla_host *vha = lport->private;
 703
 704        ql_log(ql_log_info, vha, 0x210f,
 705            "localport delete of %p completed.\n", vha->nvme_local_port);
 706        vha->nvme_local_port = NULL;
 707        complete(&vha->nvme_del_done);
 708}
 709
 710static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 711{
 712        fc_port_t *fcport;
 713        struct qla_nvme_rport *qla_rport = rport->private;
 714
 715        fcport = qla_rport->fcport;
 716        fcport->nvme_remote_port = NULL;
 717        fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
 718        fcport->nvme_flag &= ~NVME_FLAG_DELETING;
 719        ql_log(ql_log_info, fcport->vha, 0x2110,
 720            "remoteport_delete of %p %8phN completed.\n",
 721            fcport, fcport->port_name);
 722        complete(&fcport->nvme_del_done);
 723}
 724
 725static struct nvme_fc_port_template qla_nvme_fc_transport = {
 726        .localport_delete = qla_nvme_localport_delete,
 727        .remoteport_delete = qla_nvme_remoteport_delete,
 728        .create_queue   = qla_nvme_alloc_queue,
 729        .delete_queue   = NULL,
 730        .ls_req         = qla_nvme_ls_req,
 731        .ls_abort       = qla_nvme_ls_abort,
 732        .fcp_io         = qla_nvme_post_cmd,
 733        .fcp_abort      = qla_nvme_fcp_abort,
 734        .map_queues     = qla_nvme_map_queues,
 735        .max_hw_queues  = DEF_NVME_HW_QUEUES,
 736        .max_sgl_segments = 1024,
 737        .max_dif_sgl_segments = 64,
 738        .dma_boundary = 0xFFFFFFFF,
 739        .local_priv_sz  = 8,
 740        .remote_priv_sz = sizeof(struct qla_nvme_rport),
 741        .lsrqst_priv_sz = sizeof(struct nvme_private),
 742        .fcprqst_priv_sz = sizeof(struct nvme_private),
 743};
 744
 745void qla_nvme_unregister_remote_port(struct fc_port *fcport)
 746{
 747        int ret;
 748
 749        if (!IS_ENABLED(CONFIG_NVME_FC))
 750                return;
 751
 752        ql_log(ql_log_warn, fcport->vha, 0x2112,
 753            "%s: unregister remoteport on %p %8phN\n",
 754            __func__, fcport, fcport->port_name);
 755
 756        if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
 757                nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
 758
 759        init_completion(&fcport->nvme_del_done);
 760        ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
 761        if (ret)
 762                ql_log(ql_log_info, fcport->vha, 0x2114,
 763                        "%s: Failed to unregister nvme_remote_port (%d)\n",
 764                            __func__, ret);
 765        wait_for_completion(&fcport->nvme_del_done);
 766}
 767
 768void qla_nvme_delete(struct scsi_qla_host *vha)
 769{
 770        int nv_ret;
 771
 772        if (!IS_ENABLED(CONFIG_NVME_FC))
 773                return;
 774
 775        if (vha->nvme_local_port) {
 776                init_completion(&vha->nvme_del_done);
 777                ql_log(ql_log_info, vha, 0x2116,
 778                        "unregister localport=%p\n",
 779                        vha->nvme_local_port);
 780                nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
 781                if (nv_ret)
 782                        ql_log(ql_log_info, vha, 0x2115,
 783                            "Unregister of localport failed\n");
 784                else
 785                        wait_for_completion(&vha->nvme_del_done);
 786        }
 787}
 788
 789int qla_nvme_register_hba(struct scsi_qla_host *vha)
 790{
 791        struct nvme_fc_port_template *tmpl;
 792        struct qla_hw_data *ha;
 793        struct nvme_fc_port_info pinfo;
 794        int ret = -EINVAL;
 795
 796        if (!IS_ENABLED(CONFIG_NVME_FC))
 797                return ret;
 798
 799        ha = vha->hw;
 800        tmpl = &qla_nvme_fc_transport;
 801
 802        if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
 803                ql_log(ql_log_warn, vha, 0xfffd,
 804                    "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
 805                    ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
 806                ql2xnvme_queues = DEF_NVME_HW_QUEUES;
 807        } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
 808                ql_log(ql_log_warn, vha, 0xfffd,
 809                       "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
 810                       ql2xnvme_queues, (ha->max_qpairs - 1),
 811                       (ha->max_qpairs - 1));
 812                ql2xnvme_queues = ((ha->max_qpairs - 1));
 813        }
 814
 815        qla_nvme_fc_transport.max_hw_queues =
 816            min((uint8_t)(ql2xnvme_queues),
 817                (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
 818
 819        ql_log(ql_log_info, vha, 0xfffb,
 820               "Number of NVME queues used for this port: %d\n",
 821            qla_nvme_fc_transport.max_hw_queues);
 822
 823        pinfo.node_name = wwn_to_u64(vha->node_name);
 824        pinfo.port_name = wwn_to_u64(vha->port_name);
 825        pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
 826        pinfo.port_id = vha->d_id.b24;
 827
 828        mutex_lock(&ha->vport_lock);
 829        /*
 830         * Check again for nvme_local_port to see if any other thread raced
 831         * with this one and finished registration.
 832         */
 833        if (!vha->nvme_local_port) {
 834                ql_log(ql_log_info, vha, 0xffff,
 835                    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
 836                    pinfo.node_name, pinfo.port_name, pinfo.port_id);
 837                qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
 838
 839                ret = nvme_fc_register_localport(&pinfo, tmpl,
 840                                                 get_device(&ha->pdev->dev),
 841                                                 &vha->nvme_local_port);
 842                mutex_unlock(&ha->vport_lock);
 843        } else {
 844                mutex_unlock(&ha->vport_lock);
 845                return 0;
 846        }
 847        if (ret) {
 848                ql_log(ql_log_warn, vha, 0xffff,
 849                    "register_localport failed: ret=%x\n", ret);
 850        } else {
 851                vha->nvme_local_port->private = vha;
 852        }
 853
 854        return ret;
 855}
 856
 857void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
 858{
 859        struct qla_hw_data *ha;
 860
 861        if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
 862                return;
 863
 864        ha = orig_sp->fcport->vha->hw;
 865
 866        WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
 867        /* Use Driver Specified Retry Count */
 868        abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
 869        abt->drv.abts_rty_cnt = cpu_to_le16(2);
 870        /* Use specified response timeout */
 871        abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
 872        /* set it to 2 * r_a_tov in secs */
 873        abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
 874}
 875
 876void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
 877{
 878        u16     comp_status;
 879        struct scsi_qla_host *vha;
 880
 881        if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
 882                return;
 883
 884        vha = orig_sp->fcport->vha;
 885
 886        comp_status = le16_to_cpu(abt->comp_status);
 887        switch (comp_status) {
 888        case CS_RESET:          /* reset event aborted */
 889        case CS_ABORTED:        /* IOCB was cleaned */
 890        /* N_Port handle is not currently logged in */
 891        case CS_TIMEOUT:
 892        /* N_Port handle was logged out while waiting for ABTS to complete */
 893        case CS_PORT_UNAVAILABLE:
 894        /* Firmware found that the port name changed */
 895        case CS_PORT_LOGGED_OUT:
 896        /* BA_RJT was received for the ABTS */
 897        case CS_PORT_CONFIG_CHG:
 898                ql_dbg(ql_dbg_async, vha, 0xf09d,
 899                       "Abort I/O IOCB completed with error, comp_status=%x\n",
 900                comp_status);
 901                break;
 902
 903        /* BA_RJT was received for the ABTS */
 904        case CS_REJECT_RECEIVED:
 905                ql_dbg(ql_dbg_async, vha, 0xf09e,
 906                       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
 907                        abt->fw.ba_rjt_vendorUnique);
 908                ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
 909                       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
 910                       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
 911                break;
 912
 913        case CS_COMPLETE:
 914                ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
 915                       "IOCB request is completed successfully comp_status=%x\n",
 916                comp_status);
 917                break;
 918
 919        case CS_IOCB_ERROR:
 920                ql_dbg(ql_dbg_async, vha, 0xf0a0,
 921                       "IOCB request is failed, comp_status=%x\n", comp_status);
 922                break;
 923
 924        default:
 925                ql_dbg(ql_dbg_async, vha, 0xf0a1,
 926                       "Invalid Abort IO IOCB Completion Status %x\n",
 927                comp_status);
 928                break;
 929        }
 930}
 931
 932inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
 933{
 934        if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
 935                return;
 936        kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
 937}
 938