linux/drivers/scsi/qla2xxx/qla_nvme.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2017 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_nvme.h"
   8#include <linux/scatterlist.h>
   9#include <linux/delay.h>
  10#include <linux/nvme.h>
  11#include <linux/nvme-fc.h>
  12
  13static struct nvme_fc_port_template qla_nvme_fc_transport;
  14
  15static void qla_nvme_unregister_remote_port(struct work_struct *);
  16
  17int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
  18{
  19        struct nvme_rport *rport;
  20        int ret;
  21
  22        if (!IS_ENABLED(CONFIG_NVME_FC))
  23                return 0;
  24
  25        if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
  26                return 0;
  27
  28        if (!vha->flags.nvme_enabled) {
  29                ql_log(ql_log_info, vha, 0x2100,
  30                    "%s: Not registering target since Host NVME is not enabled\n",
  31                    __func__);
  32                return 0;
  33        }
  34
  35        if (!(fcport->nvme_prli_service_param &
  36            (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
  37                return 0;
  38
  39        INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
  40        rport = kzalloc(sizeof(*rport), GFP_KERNEL);
  41        if (!rport) {
  42                ql_log(ql_log_warn, vha, 0x2101,
  43                    "%s: unable to alloc memory\n", __func__);
  44                return -ENOMEM;
  45        }
  46
  47        rport->req.port_name = wwn_to_u64(fcport->port_name);
  48        rport->req.node_name = wwn_to_u64(fcport->node_name);
  49        rport->req.port_role = 0;
  50
  51        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
  52                rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  53
  54        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
  55                rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
  56
  57        if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
  58                rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
  59
  60        rport->req.port_id = fcport->d_id.b24;
  61
  62        ql_log(ql_log_info, vha, 0x2102,
  63            "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
  64            __func__, rport->req.node_name, rport->req.port_name,
  65            rport->req.port_id);
  66
  67        ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
  68            &fcport->nvme_remote_port);
  69        if (ret) {
  70                ql_log(ql_log_warn, vha, 0x212e,
  71                    "Failed to register remote port. Transport returned %d\n",
  72                    ret);
  73                return ret;
  74        }
  75
  76        fcport->nvme_remote_port->private = fcport;
  77        fcport->nvme_flag |= NVME_FLAG_REGISTERED;
  78        rport->fcport = fcport;
  79        list_add_tail(&rport->list, &vha->nvme_rport_list);
  80        return 0;
  81}
  82
  83/* Allocate a queue for NVMe traffic */
  84static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
  85    unsigned int qidx, u16 qsize, void **handle)
  86{
  87        struct scsi_qla_host *vha;
  88        struct qla_hw_data *ha;
  89        struct qla_qpair *qpair;
  90
  91        if (!qidx)
  92                qidx++;
  93
  94        vha = (struct scsi_qla_host *)lport->private;
  95        ha = vha->hw;
  96
  97        ql_log(ql_log_info, vha, 0x2104,
  98            "%s: handle %p, idx =%d, qsize %d\n",
  99            __func__, handle, qidx, qsize);
 100
 101        if (qidx > qla_nvme_fc_transport.max_hw_queues) {
 102                ql_log(ql_log_warn, vha, 0x212f,
 103                    "%s: Illegal qidx=%d. Max=%d\n",
 104                    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
 105                return -EINVAL;
 106        }
 107
 108        if (ha->queue_pair_map[qidx]) {
 109                *handle = ha->queue_pair_map[qidx];
 110                ql_log(ql_log_info, vha, 0x2121,
 111                    "Returning existing qpair of %p for idx=%x\n",
 112                    *handle, qidx);
 113                return 0;
 114        }
 115
 116        ql_log(ql_log_warn, vha, 0xffff,
 117            "allocating q for idx=%x w/o cpu mask\n", qidx);
 118        qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
 119        if (qpair == NULL) {
 120                ql_log(ql_log_warn, vha, 0x2122,
 121                    "Failed to allocate qpair\n");
 122                return -EINVAL;
 123        }
 124        *handle = qpair;
 125
 126        return 0;
 127}
 128
 129static void qla_nvme_sp_ls_done(void *ptr, int res)
 130{
 131        srb_t *sp = ptr;
 132        struct srb_iocb *nvme;
 133        struct nvmefc_ls_req   *fd;
 134        struct nvme_private *priv;
 135
 136        if (atomic_read(&sp->ref_count) == 0) {
 137                ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
 138                    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
 139                return;
 140        }
 141
 142        if (!atomic_dec_and_test(&sp->ref_count))
 143                return;
 144
 145        if (res)
 146                res = -EINVAL;
 147
 148        nvme = &sp->u.iocb_cmd;
 149        fd = nvme->u.nvme.desc;
 150        priv = fd->private;
 151        priv->comp_status = res;
 152        schedule_work(&priv->ls_work);
 153        /* work schedule doesn't need the sp */
 154        qla2x00_rel_sp(sp);
 155}
 156
 157void qla_nvme_cmpl_io(struct srb_iocb *nvme)
 158{
 159        srb_t *sp;
 160        struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
 161
 162        sp = container_of(nvme, srb_t, u.iocb_cmd);
 163        fd->done(fd);
 164        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 165}
 166
 167static void qla_nvme_sp_done(void *ptr, int res)
 168{
 169        srb_t *sp = ptr;
 170        struct srb_iocb *nvme;
 171        struct nvmefc_fcp_req *fd;
 172
 173        nvme = &sp->u.iocb_cmd;
 174        fd = nvme->u.nvme.desc;
 175
 176        if (!atomic_dec_and_test(&sp->ref_count))
 177                return;
 178
 179        if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
 180                goto rel;
 181
 182        if (unlikely(res == QLA_FUNCTION_FAILED))
 183                fd->status = NVME_SC_INTERNAL;
 184        else
 185                fd->status = 0;
 186
 187        fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
 188        list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
 189        return;
 190rel:
 191        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 192}
 193
 194static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
 195    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 196{
 197        struct nvme_private *priv = fd->private;
 198        fc_port_t *fcport = rport->private;
 199        srb_t *sp = priv->sp;
 200        int rval;
 201        struct qla_hw_data *ha = fcport->vha->hw;
 202
 203        rval = ha->isp_ops->abort_command(sp);
 204
 205        ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
 206            "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__,
 207            (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
 208            sp, fcport, rval);
 209}
 210
 211static void qla_nvme_ls_complete(struct work_struct *work)
 212{
 213        struct nvme_private *priv =
 214            container_of(work, struct nvme_private, ls_work);
 215        struct nvmefc_ls_req *fd = priv->fd;
 216
 217        fd->done(fd, priv->comp_status);
 218}
 219
 220static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 221    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 222{
 223        fc_port_t *fcport = rport->private;
 224        struct srb_iocb   *nvme;
 225        struct nvme_private *priv = fd->private;
 226        struct scsi_qla_host *vha;
 227        int     rval = QLA_FUNCTION_FAILED;
 228        struct qla_hw_data *ha;
 229        srb_t           *sp;
 230
 231        if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
 232                return rval;
 233
 234        vha = fcport->vha;
 235        ha = vha->hw;
 236        /* Alloc SRB structure */
 237        sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 238        if (!sp)
 239                return rval;
 240
 241        sp->type = SRB_NVME_LS;
 242        sp->name = "nvme_ls";
 243        sp->done = qla_nvme_sp_ls_done;
 244        atomic_set(&sp->ref_count, 1);
 245        nvme = &sp->u.iocb_cmd;
 246        priv->sp = sp;
 247        priv->fd = fd;
 248        INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
 249        nvme->u.nvme.desc = fd;
 250        nvme->u.nvme.dir = 0;
 251        nvme->u.nvme.dl = 0;
 252        nvme->u.nvme.cmd_len = fd->rqstlen;
 253        nvme->u.nvme.rsp_len = fd->rsplen;
 254        nvme->u.nvme.rsp_dma = fd->rspdma;
 255        nvme->u.nvme.timeout_sec = fd->timeout;
 256        nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
 257            fd->rqstlen, DMA_TO_DEVICE);
 258        dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
 259            fd->rqstlen, DMA_TO_DEVICE);
 260
 261        rval = qla2x00_start_sp(sp);
 262        if (rval != QLA_SUCCESS) {
 263                ql_log(ql_log_warn, vha, 0x700e,
 264                    "qla2x00_start_sp failed = %d\n", rval);
 265                atomic_dec(&sp->ref_count);
 266                wake_up(&sp->nvme_ls_waitq);
 267                return rval;
 268        }
 269
 270        return rval;
 271}
 272
 273static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
 274    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
 275    struct nvmefc_fcp_req *fd)
 276{
 277        struct nvme_private *priv = fd->private;
 278        srb_t *sp = priv->sp;
 279        int rval;
 280        fc_port_t *fcport = rport->private;
 281        struct qla_hw_data *ha = fcport->vha->hw;
 282
 283        rval = ha->isp_ops->abort_command(sp);
 284
 285        ql_dbg(ql_dbg_io, fcport->vha, 0x2127,
 286            "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
 287            (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
 288            sp, fcport, rval);
 289}
 290
 291static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
 292{
 293        struct scsi_qla_host *vha = lport->private;
 294        unsigned long flags;
 295        struct qla_qpair *qpair = hw_queue_handle;
 296
 297        /* Acquire ring specific lock */
 298        spin_lock_irqsave(&qpair->qp_lock, flags);
 299        qla24xx_process_response_queue(vha, qpair->rsp);
 300        spin_unlock_irqrestore(&qpair->qp_lock, flags);
 301}
 302
 303static int qla2x00_start_nvme_mq(srb_t *sp)
 304{
 305        unsigned long   flags;
 306        uint32_t        *clr_ptr;
 307        uint32_t        index;
 308        uint32_t        handle;
 309        struct cmd_nvme *cmd_pkt;
 310        uint16_t        cnt, i;
 311        uint16_t        req_cnt;
 312        uint16_t        tot_dsds;
 313        uint16_t        avail_dsds;
 314        uint32_t        *cur_dsd;
 315        struct req_que *req = NULL;
 316        struct rsp_que *rsp = NULL;
 317        struct scsi_qla_host *vha = sp->fcport->vha;
 318        struct qla_hw_data *ha = vha->hw;
 319        struct qla_qpair *qpair = sp->qpair;
 320        struct srb_iocb *nvme = &sp->u.iocb_cmd;
 321        struct scatterlist *sgl, *sg;
 322        struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
 323        uint32_t        rval = QLA_SUCCESS;
 324
 325        tot_dsds = fd->sg_cnt;
 326
 327        /* Acquire qpair specific lock */
 328        spin_lock_irqsave(&qpair->qp_lock, flags);
 329
 330        /* Setup qpair pointers */
 331        req = qpair->req;
 332        rsp = qpair->rsp;
 333
 334        /* Check for room in outstanding command list. */
 335        handle = req->current_outstanding_cmd;
 336        for (index = 1; index < req->num_outstanding_cmds; index++) {
 337                handle++;
 338                if (handle == req->num_outstanding_cmds)
 339                        handle = 1;
 340                if (!req->outstanding_cmds[handle])
 341                        break;
 342        }
 343
 344        if (index == req->num_outstanding_cmds) {
 345                rval = -1;
 346                goto queuing_error;
 347        }
 348        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
 349        if (req->cnt < (req_cnt + 2)) {
 350                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
 351                    RD_REG_DWORD_RELAXED(req->req_q_out);
 352
 353                if (req->ring_index < cnt)
 354                        req->cnt = cnt - req->ring_index;
 355                else
 356                        req->cnt = req->length - (req->ring_index - cnt);
 357
 358                if (req->cnt < (req_cnt + 2)){
 359                        rval = -1;
 360                        goto queuing_error;
 361                }
 362        }
 363
 364        if (unlikely(!fd->sqid)) {
 365                struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
 366                if (cmd->sqe.common.opcode == nvme_admin_async_event) {
 367                        nvme->u.nvme.aen_op = 1;
 368                        atomic_inc(&vha->hw->nvme_active_aen_cnt);
 369                }
 370        }
 371
 372        /* Build command packet. */
 373        req->current_outstanding_cmd = handle;
 374        req->outstanding_cmds[handle] = sp;
 375        sp->handle = handle;
 376        req->cnt -= req_cnt;
 377
 378        cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
 379        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
 380
 381        /* Zero out remaining portion of packet. */
 382        clr_ptr = (uint32_t *)cmd_pkt + 2;
 383        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 384
 385        cmd_pkt->entry_status = 0;
 386
 387        /* Update entry type to indicate Command NVME IOCB */
 388        cmd_pkt->entry_type = COMMAND_NVME;
 389
 390        /* No data transfer how do we check buffer len == 0?? */
 391        if (fd->io_dir == NVMEFC_FCP_READ) {
 392                cmd_pkt->control_flags =
 393                    cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
 394                vha->qla_stats.input_bytes += fd->payload_length;
 395                vha->qla_stats.input_requests++;
 396        } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
 397                cmd_pkt->control_flags =
 398                    cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
 399                vha->qla_stats.output_bytes += fd->payload_length;
 400                vha->qla_stats.output_requests++;
 401        } else if (fd->io_dir == 0) {
 402                cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
 403        }
 404
 405        /* Set NPORT-ID */
 406        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 407        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
 408        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 409        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 410        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 411
 412        /* NVME RSP IU */
 413        cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
 414        cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
 415        cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
 416
 417        /* NVME CNMD IU */
 418        cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
 419        cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
 420        cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
 421
 422        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 423        cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
 424
 425        /* One DSD is available in the Command Type NVME IOCB */
 426        avail_dsds = 1;
 427        cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
 428        sgl = fd->first_sgl;
 429
 430        /* Load data segments */
 431        for_each_sg(sgl, sg, tot_dsds, i) {
 432                dma_addr_t      sle_dma;
 433                cont_a64_entry_t *cont_pkt;
 434
 435                /* Allocate additional continuation packets? */
 436                if (avail_dsds == 0) {
 437                        /*
 438                         * Five DSDs are available in the Continuation
 439                         * Type 1 IOCB.
 440                         */
 441
 442                        /* Adjust ring index */
 443                        req->ring_index++;
 444                        if (req->ring_index == req->length) {
 445                                req->ring_index = 0;
 446                                req->ring_ptr = req->ring;
 447                        } else {
 448                                req->ring_ptr++;
 449                        }
 450                        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 451                        *((uint32_t *)(&cont_pkt->entry_type)) =
 452                            cpu_to_le32(CONTINUE_A64_TYPE);
 453
 454                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 455                        avail_dsds = 5;
 456                }
 457
 458                sle_dma = sg_dma_address(sg);
 459                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 460                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 461                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 462                avail_dsds--;
 463        }
 464
 465        /* Set total entry count. */
 466        cmd_pkt->entry_count = (uint8_t)req_cnt;
 467        wmb();
 468
 469        /* Adjust ring index. */
 470        req->ring_index++;
 471        if (req->ring_index == req->length) {
 472                req->ring_index = 0;
 473                req->ring_ptr = req->ring;
 474        } else {
 475                req->ring_ptr++;
 476        }
 477
 478        /* Set chip new ring index. */
 479        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 480
 481        /* Manage unprocessed RIO/ZIO commands in response queue. */
 482        if (vha->flags.process_response_queue &&
 483            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 484                qla24xx_process_response_queue(vha, rsp);
 485
 486queuing_error:
 487        spin_unlock_irqrestore(&qpair->qp_lock, flags);
 488        return rval;
 489}
 490
 491/* Post a command */
 492static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 493    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
 494    struct nvmefc_fcp_req *fd)
 495{
 496        fc_port_t *fcport;
 497        struct srb_iocb *nvme;
 498        struct scsi_qla_host *vha;
 499        int rval = QLA_FUNCTION_FAILED;
 500        srb_t *sp;
 501        struct qla_qpair *qpair = hw_queue_handle;
 502        struct nvme_private *priv;
 503
 504        if (!fd) {
 505                ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
 506                return rval;
 507        }
 508
 509        priv = fd->private;
 510        fcport = rport->private;
 511        if (!fcport) {
 512                ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
 513                return rval;
 514        }
 515
 516        vha = fcport->vha;
 517        if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
 518                return -EBUSY;
 519
 520        /* Alloc SRB structure */
 521        sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
 522        if (!sp)
 523                return -EIO;
 524
 525        atomic_set(&sp->ref_count, 1);
 526        init_waitqueue_head(&sp->nvme_ls_waitq);
 527        priv->sp = sp;
 528        sp->type = SRB_NVME_CMD;
 529        sp->name = "nvme_cmd";
 530        sp->done = qla_nvme_sp_done;
 531        sp->qpair = qpair;
 532        nvme = &sp->u.iocb_cmd;
 533        nvme->u.nvme.desc = fd;
 534
 535        rval = qla2x00_start_nvme_mq(sp);
 536        if (rval != QLA_SUCCESS) {
 537                ql_log(ql_log_warn, vha, 0x212d,
 538                    "qla2x00_start_nvme_mq failed = %d\n", rval);
 539                atomic_dec(&sp->ref_count);
 540                wake_up(&sp->nvme_ls_waitq);
 541                return -EIO;
 542        }
 543
 544        return rval;
 545}
 546
 547static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
 548{
 549        struct scsi_qla_host *vha = lport->private;
 550
 551        ql_log(ql_log_info, vha, 0x210f,
 552            "localport delete of %p completed.\n", vha->nvme_local_port);
 553        vha->nvme_local_port = NULL;
 554        complete(&vha->nvme_del_done);
 555}
 556
 557static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 558{
 559        fc_port_t *fcport;
 560        struct nvme_rport *r_port, *trport;
 561
 562        fcport = rport->private;
 563        fcport->nvme_remote_port = NULL;
 564        fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
 565
 566        list_for_each_entry_safe(r_port, trport,
 567            &fcport->vha->nvme_rport_list, list) {
 568                if (r_port->fcport == fcport) {
 569                        list_del(&r_port->list);
 570                        break;
 571                }
 572        }
 573        kfree(r_port);
 574        complete(&fcport->nvme_del_done);
 575
 576        ql_log(ql_log_info, fcport->vha, 0x2110,
 577            "remoteport_delete of %p completed.\n", fcport);
 578}
 579
 580static struct nvme_fc_port_template qla_nvme_fc_transport = {
 581        .localport_delete = qla_nvme_localport_delete,
 582        .remoteport_delete = qla_nvme_remoteport_delete,
 583        .create_queue   = qla_nvme_alloc_queue,
 584        .delete_queue   = NULL,
 585        .ls_req         = qla_nvme_ls_req,
 586        .ls_abort       = qla_nvme_ls_abort,
 587        .fcp_io         = qla_nvme_post_cmd,
 588        .fcp_abort      = qla_nvme_fcp_abort,
 589        .poll_queue     = qla_nvme_poll,
 590        .max_hw_queues  = 8,
 591        .max_sgl_segments = 128,
 592        .max_dif_sgl_segments = 64,
 593        .dma_boundary = 0xFFFFFFFF,
 594        .local_priv_sz  = 8,
 595        .remote_priv_sz = 0,
 596        .lsrqst_priv_sz = sizeof(struct nvme_private),
 597        .fcprqst_priv_sz = sizeof(struct nvme_private),
 598};
 599
 600#define NVME_ABORT_POLLING_PERIOD    2
 601static int qla_nvme_wait_on_command(srb_t *sp)
 602{
 603        int ret = QLA_SUCCESS;
 604
 605        wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
 606            NVME_ABORT_POLLING_PERIOD*HZ);
 607
 608        if (atomic_read(&sp->ref_count) > 1)
 609                ret = QLA_FUNCTION_FAILED;
 610
 611        return ret;
 612}
 613
 614static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
 615{
 616        int ret = QLA_SUCCESS;
 617        int timeout;
 618
 619        timeout = wait_for_completion_timeout(&fcport->nvme_del_done,
 620            msecs_to_jiffies(2000));
 621        if (!timeout) {
 622                ret = QLA_FUNCTION_FAILED;
 623                ql_log(ql_log_info, fcport->vha, 0x2111,
 624                    "timed out waiting for fcport=%p to delete\n", fcport);
 625        }
 626
 627        return ret;
 628}
 629
 630void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
 631{
 632        int rval;
 633
 634        rval = ha->isp_ops->abort_command(sp);
 635        if (!rval && !qla_nvme_wait_on_command(sp))
 636                ql_log(ql_log_warn, NULL, 0x2112,
 637                    "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
 638}
 639
 640static void qla_nvme_unregister_remote_port(struct work_struct *work)
 641{
 642        struct fc_port *fcport = container_of(work, struct fc_port,
 643            nvme_del_work);
 644        struct nvme_rport *rport, *trport;
 645
 646        if (!IS_ENABLED(CONFIG_NVME_FC))
 647                return;
 648
 649        ql_log(ql_log_warn, NULL, 0x2112,
 650            "%s: unregister remoteport on %p\n",__func__, fcport);
 651
 652        list_for_each_entry_safe(rport, trport,
 653            &fcport->vha->nvme_rport_list, list) {
 654                if (rport->fcport == fcport) {
 655                        ql_log(ql_log_info, fcport->vha, 0x2113,
 656                            "%s: fcport=%p\n", __func__, fcport);
 657                        init_completion(&fcport->nvme_del_done);
 658                        nvme_fc_unregister_remoteport(
 659                            fcport->nvme_remote_port);
 660                        qla_nvme_wait_on_rport_del(fcport);
 661                }
 662        }
 663}
 664
 665void qla_nvme_delete(struct scsi_qla_host *vha)
 666{
 667        struct nvme_rport *rport, *trport;
 668        fc_port_t *fcport;
 669        int nv_ret;
 670
 671        if (!IS_ENABLED(CONFIG_NVME_FC))
 672                return;
 673
 674        list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
 675                fcport = rport->fcport;
 676
 677                ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
 678                    __func__, fcport);
 679
 680                init_completion(&fcport->nvme_del_done);
 681                nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
 682                qla_nvme_wait_on_rport_del(fcport);
 683        }
 684
 685        if (vha->nvme_local_port) {
 686                init_completion(&vha->nvme_del_done);
 687                nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
 688                if (nv_ret == 0)
 689                        ql_log(ql_log_info, vha, 0x2116,
 690                            "unregistered localport=%p\n",
 691                            vha->nvme_local_port);
 692                else
 693                        ql_log(ql_log_info, vha, 0x2115,
 694                            "Unregister of localport failed\n");
 695                wait_for_completion_timeout(&vha->nvme_del_done,
 696                    msecs_to_jiffies(5000));
 697        }
 698}
 699
 700void qla_nvme_register_hba(struct scsi_qla_host *vha)
 701{
 702        struct nvme_fc_port_template *tmpl;
 703        struct qla_hw_data *ha;
 704        struct nvme_fc_port_info pinfo;
 705        int ret;
 706
 707        if (!IS_ENABLED(CONFIG_NVME_FC))
 708                return;
 709
 710        ha = vha->hw;
 711        tmpl = &qla_nvme_fc_transport;
 712
 713        WARN_ON(vha->nvme_local_port);
 714        WARN_ON(ha->max_req_queues < 3);
 715
 716        qla_nvme_fc_transport.max_hw_queues =
 717            min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
 718                (uint8_t)(ha->max_req_queues - 2));
 719
 720        pinfo.node_name = wwn_to_u64(vha->node_name);
 721        pinfo.port_name = wwn_to_u64(vha->port_name);
 722        pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
 723        pinfo.port_id = vha->d_id.b24;
 724
 725        ql_log(ql_log_info, vha, 0xffff,
 726            "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
 727            pinfo.node_name, pinfo.port_name, pinfo.port_id);
 728        qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
 729
 730        ret = nvme_fc_register_localport(&pinfo, tmpl,
 731            get_device(&ha->pdev->dev), &vha->nvme_local_port);
 732        if (ret) {
 733                ql_log(ql_log_warn, vha, 0xffff,
 734                    "register_localport failed: ret=%x\n", ret);
 735                return;
 736        }
 737        vha->nvme_local_port->private = vha;
 738}
 739