linux/drivers/scsi/lpfc/lpfc_nvme.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 ********************************************************************/
  23#include <linux/pci.h>
  24#include <linux/slab.h>
  25#include <linux/interrupt.h>
  26#include <linux/delay.h>
  27#include <asm/unaligned.h>
  28#include <linux/crc-t10dif.h>
  29#include <net/checksum.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_tcq.h>
  36#include <scsi/scsi_transport_fc.h>
  37#include <scsi/fc/fc_fs.h>
  38
  39#include "lpfc_version.h"
  40#include "lpfc_hw4.h"
  41#include "lpfc_hw.h"
  42#include "lpfc_sli.h"
  43#include "lpfc_sli4.h"
  44#include "lpfc_nl.h"
  45#include "lpfc_disc.h"
  46#include "lpfc.h"
  47#include "lpfc_nvme.h"
  48#include "lpfc_scsi.h"
  49#include "lpfc_logmsg.h"
  50#include "lpfc_crtn.h"
  51#include "lpfc_vport.h"
  52#include "lpfc_debugfs.h"
  53
  54/* NVME initiator-based functions */
  55
  56static struct lpfc_io_buf *
  57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  58                  int idx, int expedite);
  59
  60static void
  61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
  62
  63static struct nvme_fc_port_template lpfc_nvme_template;
  64
  65/**
  66 * lpfc_nvme_create_queue -
  67 * @pnvme_lport: Transport localport that LS is to be issued from
  68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
  69 * @qsize: Size of the queue in bytes
  70 * @handle: An opaque driver handle used in follow-up calls.
  71 *
  72 * Driver registers this routine to preallocate and initialize any
  73 * internal data structures to bind the @qidx to its internal IO queues.
  74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
  75 *
  76 * Return value :
  77 *   0 - Success
  78 *   -EINVAL - Unsupported input value.
  79 *   -ENOMEM - Could not alloc necessary memory
  80 **/
  81static int
  82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
  83                       unsigned int qidx, u16 qsize,
  84                       void **handle)
  85{
  86        struct lpfc_nvme_lport *lport;
  87        struct lpfc_vport *vport;
  88        struct lpfc_nvme_qhandle *qhandle;
  89        char *str;
  90
  91        if (!pnvme_lport->private)
  92                return -ENOMEM;
  93
  94        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  95        vport = lport->vport;
  96        qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
  97        if (qhandle == NULL)
  98                return -ENOMEM;
  99
 100        qhandle->cpu_id = raw_smp_processor_id();
 101        qhandle->qidx = qidx;
 102        /*
 103         * NVME qidx == 0 is the admin queue, so both admin queue
 104         * and first IO queue will use MSI-X vector and associated
 105         * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
 106         */
 107        if (qidx) {
 108                str = "IO ";  /* IO queue */
 109                qhandle->index = ((qidx - 1) %
 110                        lpfc_nvme_template.max_hw_queues);
 111        } else {
 112                str = "ADM";  /* Admin queue */
 113                qhandle->index = qidx;
 114        }
 115
 116        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 117                         "6073 Binding %s HdwQueue %d  (cpu %d) to "
 118                         "hdw_queue %d qhandle x%px\n", str,
 119                         qidx, qhandle->cpu_id, qhandle->index, qhandle);
 120        *handle = (void *)qhandle;
 121        return 0;
 122}
 123
 124/**
 125 * lpfc_nvme_delete_queue -
 126 * @pnvme_lport: Transport localport that LS is to be issued from
 127 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 128 * @handle: An opaque driver handle from lpfc_nvme_create_queue
 129 *
 130 * Driver registers this routine to free
 131 * any internal data structures to bind the @qidx to its internal
 132 * IO queues.
 133 *
 134 * Return value :
 135 *   0 - Success
 136 *   TODO:  What are the failure codes.
 137 **/
 138static void
 139lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
 140                       unsigned int qidx,
 141                       void *handle)
 142{
 143        struct lpfc_nvme_lport *lport;
 144        struct lpfc_vport *vport;
 145
 146        if (!pnvme_lport->private)
 147                return;
 148
 149        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 150        vport = lport->vport;
 151
 152        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 153                        "6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
 154                        lport, qidx, handle);
 155        kfree(handle);
 156}
 157
 158static void
 159lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
 160{
 161        struct lpfc_nvme_lport *lport = localport->private;
 162
 163        lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
 164                         "6173 localport x%px delete complete\n",
 165                         lport);
 166
 167        /* release any threads waiting for the unreg to complete */
 168        if (lport->vport->localport)
 169                complete(lport->lport_unreg_cmp);
 170}
 171
 172/* lpfc_nvme_remoteport_delete
 173 *
 174 * @remoteport: Pointer to an nvme transport remoteport instance.
 175 *
 176 * This is a template downcall.  NVME transport calls this function
 177 * when it has completed the unregistration of a previously
 178 * registered remoteport.
 179 *
 180 * Return value :
 181 * None
 182 */
 183static void
 184lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 185{
 186        struct lpfc_nvme_rport *rport = remoteport->private;
 187        struct lpfc_vport *vport;
 188        struct lpfc_nodelist *ndlp;
 189        u32 fc4_xpt_flags;
 190
 191        ndlp = rport->ndlp;
 192        if (!ndlp) {
 193                pr_err("**** %s: NULL ndlp on rport %p remoteport %p\n",
 194                       __func__, rport, remoteport);
 195                goto rport_err;
 196        }
 197
 198        vport = ndlp->vport;
 199        if (!vport) {
 200                pr_err("**** %s: Null vport on ndlp %p, ste x%x rport %p\n",
 201                       __func__, ndlp, ndlp->nlp_state, rport);
 202                goto rport_err;
 203        }
 204
 205        fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
 206
 207        /* Remove this rport from the lport's list - memory is owned by the
 208         * transport. Remove the ndlp reference for the NVME transport before
 209         * calling state machine to remove the node.
 210         */
 211        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 212                        "6146 remoteport delete of remoteport %p\n",
 213                        remoteport);
 214        spin_lock_irq(&ndlp->lock);
 215
 216        /* The register rebind might have occurred before the delete
 217         * downcall.  Guard against this race.
 218         */
 219        if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
 220                ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
 221
 222        spin_unlock_irq(&ndlp->lock);
 223
 224        /* On a devloss timeout event, one more put is executed provided the
 225         * NVME and SCSI rport unregister requests are complete.  If the vport
 226         * is unloading, this extra put is executed by lpfc_drop_node.
 227         */
 228        if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
 229                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 230
 231 rport_err:
 232        return;
 233}
 234
 235/**
 236 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
 237 * @phba: pointer to lpfc hba data structure.
 238 * @axchg: pointer to exchange context for the NVME LS request
 239 *
 240 * This routine is used for processing an asychronously received NVME LS
 241 * request. Any remaining validation is done and the LS is then forwarded
 242 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
 243 *
 244 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
 245 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
 246 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
 247 *
 248 * Returns 0 if LS was handled and delivered to the transport
 249 * Returns 1 if LS failed to be handled and should be dropped
 250 */
 251int
 252lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
 253                        struct lpfc_async_xchg_ctx *axchg)
 254{
 255#if (IS_ENABLED(CONFIG_NVME_FC))
 256        struct lpfc_vport *vport;
 257        struct lpfc_nvme_rport *lpfc_rport;
 258        struct nvme_fc_remote_port *remoteport;
 259        struct lpfc_nvme_lport *lport;
 260        uint32_t *payload = axchg->payload;
 261        int rc;
 262
 263        vport = axchg->ndlp->vport;
 264        lpfc_rport = axchg->ndlp->nrport;
 265        if (!lpfc_rport)
 266                return -EINVAL;
 267
 268        remoteport = lpfc_rport->remoteport;
 269        if (!vport->localport)
 270                return -EINVAL;
 271
 272        lport = vport->localport->private;
 273        if (!lport)
 274                return -EINVAL;
 275
 276        rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
 277                                axchg->size);
 278
 279        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 280                        "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
 281                        "%08x %08x %08x\n",
 282                        axchg->size, rc,
 283                        *payload, *(payload+1), *(payload+2),
 284                        *(payload+3), *(payload+4), *(payload+5));
 285
 286        if (!rc)
 287                return 0;
 288#endif
 289        return 1;
 290}
 291
 292/**
 293 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
 294 *        LS request.
 295 * @phba: Pointer to HBA context object
 296 * @vport: The local port that issued the LS
 297 * @cmdwqe: Pointer to driver command WQE object.
 298 * @wcqe: Pointer to driver response CQE object.
 299 *
 300 * This function is the generic completion handler for NVME LS requests.
 301 * The function updates any states and statistics, calls the transport
 302 * ls_req done() routine, then tears down the command and buffers used
 303 * for the LS request.
 304 **/
 305void
 306__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
 307                        struct lpfc_iocbq *cmdwqe,
 308                        struct lpfc_wcqe_complete *wcqe)
 309{
 310        struct nvmefc_ls_req *pnvme_lsreq;
 311        struct lpfc_dmabuf *buf_ptr;
 312        struct lpfc_nodelist *ndlp;
 313        uint32_t status;
 314
 315        pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
 316        ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
 317        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 318
 319        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 320                         "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
 321                         "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
 322                         "ndlp:x%px\n",
 323                         pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 324                         cmdwqe->sli4_xritag, status,
 325                         (wcqe->parameter & 0xffff),
 326                         cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
 327
 328        lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
 329                         cmdwqe->sli4_xritag, status, wcqe->parameter);
 330
 331        if (cmdwqe->context3) {
 332                buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
 333                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 334                kfree(buf_ptr);
 335                cmdwqe->context3 = NULL;
 336        }
 337        if (pnvme_lsreq->done)
 338                pnvme_lsreq->done(pnvme_lsreq, status);
 339        else
 340                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 341                                 "6046 NVMEx cmpl without done call back? "
 342                                 "Data %px DID %x Xri: %x status %x\n",
 343                                pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 344                                cmdwqe->sli4_xritag, status);
 345        if (ndlp) {
 346                lpfc_nlp_put(ndlp);
 347                cmdwqe->context1 = NULL;
 348        }
 349        lpfc_sli_release_iocbq(phba, cmdwqe);
 350}
 351
 352static void
 353lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 354                       struct lpfc_wcqe_complete *wcqe)
 355{
 356        struct lpfc_vport *vport = cmdwqe->vport;
 357        struct lpfc_nvme_lport *lport;
 358        uint32_t status;
 359
 360        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 361
 362        if (vport->localport) {
 363                lport = (struct lpfc_nvme_lport *)vport->localport->private;
 364                if (lport) {
 365                        atomic_inc(&lport->fc4NvmeLsCmpls);
 366                        if (status) {
 367                                if (bf_get(lpfc_wcqe_c_xb, wcqe))
 368                                        atomic_inc(&lport->cmpl_ls_xb);
 369                                atomic_inc(&lport->cmpl_ls_err);
 370                        }
 371                }
 372        }
 373
 374        __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
 375}
 376
 377static int
 378lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 379                  struct lpfc_dmabuf *inp,
 380                  struct nvmefc_ls_req *pnvme_lsreq,
 381                  void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
 382                               struct lpfc_wcqe_complete *),
 383                  struct lpfc_nodelist *ndlp, uint32_t num_entry,
 384                  uint32_t tmo, uint8_t retry)
 385{
 386        struct lpfc_hba *phba = vport->phba;
 387        union lpfc_wqe128 *wqe;
 388        struct lpfc_iocbq *genwqe;
 389        struct ulp_bde64 *bpl;
 390        struct ulp_bde64 bde;
 391        int i, rc, xmit_len, first_len;
 392
 393        /* Allocate buffer for  command WQE */
 394        genwqe = lpfc_sli_get_iocbq(phba);
 395        if (genwqe == NULL)
 396                return 1;
 397
 398        wqe = &genwqe->wqe;
 399        /* Initialize only 64 bytes */
 400        memset(wqe, 0, sizeof(union lpfc_wqe));
 401
 402        genwqe->context3 = (uint8_t *)bmp;
 403        genwqe->iocb_flag |= LPFC_IO_NVME_LS;
 404
 405        /* Save for completion so we can release these resources */
 406        genwqe->context1 = lpfc_nlp_get(ndlp);
 407        if (!genwqe->context1) {
 408                dev_warn(&phba->pcidev->dev,
 409                         "Warning: Failed node ref, not sending LS_REQ\n");
 410                lpfc_sli_release_iocbq(phba, genwqe);
 411                return 1;
 412        }
 413
 414        genwqe->context2 = (uint8_t *)pnvme_lsreq;
 415        /* Fill in payload, bp points to frame payload */
 416
 417        if (!tmo)
 418                /* FC spec states we need 3 * ratov for CT requests */
 419                tmo = (3 * phba->fc_ratov);
 420
 421        /* For this command calculate the xmit length of the request bde. */
 422        xmit_len = 0;
 423        first_len = 0;
 424        bpl = (struct ulp_bde64 *)bmp->virt;
 425        for (i = 0; i < num_entry; i++) {
 426                bde.tus.w = bpl[i].tus.w;
 427                if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
 428                        break;
 429                xmit_len += bde.tus.f.bdeSize;
 430                if (i == 0)
 431                        first_len = xmit_len;
 432        }
 433
 434        genwqe->rsvd2 = num_entry;
 435        genwqe->hba_wqidx = 0;
 436
 437        /* Words 0 - 2 */
 438        wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 439        wqe->generic.bde.tus.f.bdeSize = first_len;
 440        wqe->generic.bde.addrLow = bpl[0].addrLow;
 441        wqe->generic.bde.addrHigh = bpl[0].addrHigh;
 442
 443        /* Word 3 */
 444        wqe->gen_req.request_payload_len = first_len;
 445
 446        /* Word 4 */
 447
 448        /* Word 5 */
 449        bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
 450        bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
 451        bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
 452        bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
 453        bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
 454
 455        /* Word 6 */
 456        bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
 457               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
 458        bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
 459
 460        /* Word 7 */
 461        bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
 462        bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
 463        bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
 464        bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
 465
 466        /* Word 8 */
 467        wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
 468
 469        /* Word 9 */
 470        bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
 471
 472        /* Word 10 */
 473        bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
 474        bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
 475        bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
 476        bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
 477        bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
 478
 479        /* Word 11 */
 480        bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 481        bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
 482
 483
 484        /* Issue GEN REQ WQE for NPORT <did> */
 485        genwqe->wqe_cmpl = cmpl;
 486        genwqe->iocb_cmpl = NULL;
 487        genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
 488        genwqe->vport = vport;
 489        genwqe->retry = retry;
 490
 491        lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
 492                         genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 493
 494        rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
 495        if (rc) {
 496                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 497                                 "6045 Issue GEN REQ WQE to NPORT x%x "
 498                                 "Data: x%x x%x  rc x%x\n",
 499                                 ndlp->nlp_DID, genwqe->iotag,
 500                                 vport->port_state, rc);
 501                lpfc_nlp_put(ndlp);
 502                lpfc_sli_release_iocbq(phba, genwqe);
 503                return 1;
 504        }
 505
 506        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
 507                         "6050 Issue GEN REQ WQE to NPORT x%x "
 508                         "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
 509                         "bmp:x%px xmit:%d 1st:%d\n",
 510                         ndlp->nlp_DID, genwqe->sli4_xritag,
 511                         vport->port_state,
 512                         genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
 513        return 0;
 514}
 515
 516
 517/**
 518 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
 519 * @vport: The local port issuing the LS
 520 * @ndlp: The remote port to send the LS to
 521 * @pnvme_lsreq: Pointer to LS request structure from the transport
 522 * @gen_req_cmp: Completion call-back
 523 *
 524 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
 525 * WQE to perform the LS operation.
 526 *
 527 * Return value :
 528 *   0 - Success
 529 *   non-zero: various error codes, in form of -Exxx
 530 **/
 531int
 532__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 533                      struct nvmefc_ls_req *pnvme_lsreq,
 534                      void (*gen_req_cmp)(struct lpfc_hba *phba,
 535                                struct lpfc_iocbq *cmdwqe,
 536                                struct lpfc_wcqe_complete *wcqe))
 537{
 538        struct lpfc_dmabuf *bmp;
 539        struct ulp_bde64 *bpl;
 540        int ret;
 541        uint16_t ntype, nstate;
 542
 543        if (!ndlp) {
 544                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 545                                 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
 546                                 "LS Req\n",
 547                                 ndlp);
 548                return -ENODEV;
 549        }
 550
 551        ntype = ndlp->nlp_type;
 552        nstate = ndlp->nlp_state;
 553        if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
 554            (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
 555                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 556                                 "6088 NVMEx LS REQ: Fail DID x%06x not "
 557                                 "ready for IO. Type x%x, State x%x\n",
 558                                 ndlp->nlp_DID, ntype, nstate);
 559                return -ENODEV;
 560        }
 561
 562        if (!vport->phba->sli4_hba.nvmels_wq)
 563                return -ENOMEM;
 564
 565        /*
 566         * there are two dma buf in the request, actually there is one and
 567         * the second one is just the start address + cmd size.
 568         * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
 569         * in a lpfc_dmabuf struct. When freeing we just free the wrapper
 570         * because the nvem layer owns the data bufs.
 571         * We do not have to break these packets open, we don't care what is
 572         * in them. And we do not have to look at the resonse data, we only
 573         * care that we got a response. All of the caring is going to happen
 574         * in the nvme-fc layer.
 575         */
 576
 577        bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
 578        if (!bmp) {
 579                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 580                                 "6044 NVMEx LS REQ: Could not alloc LS buf "
 581                                 "for DID %x\n",
 582                                 ndlp->nlp_DID);
 583                return -ENOMEM;
 584        }
 585
 586        bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
 587        if (!bmp->virt) {
 588                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 589                                 "6042 NVMEx LS REQ: Could not alloc mbuf "
 590                                 "for DID %x\n",
 591                                 ndlp->nlp_DID);
 592                kfree(bmp);
 593                return -ENOMEM;
 594        }
 595
 596        INIT_LIST_HEAD(&bmp->list);
 597
 598        bpl = (struct ulp_bde64 *)bmp->virt;
 599        bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
 600        bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
 601        bpl->tus.f.bdeFlags = 0;
 602        bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
 603        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 604        bpl++;
 605
 606        bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
 607        bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
 608        bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
 609        bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
 610        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 611
 612        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 613                        "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
 614                        "rqstlen:%d rsplen:%d %pad %pad\n",
 615                        ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
 616                        pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 617                        &pnvme_lsreq->rspdma);
 618
 619        ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
 620                                pnvme_lsreq, gen_req_cmp, ndlp, 2,
 621                                pnvme_lsreq->timeout, 0);
 622        if (ret != WQE_SUCCESS) {
 623                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 624                                 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
 625                                 "lsreq x%px Status %x DID %x\n",
 626                                 pnvme_lsreq, ret, ndlp->nlp_DID);
 627                lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
 628                kfree(bmp);
 629                return -EIO;
 630        }
 631
 632        return 0;
 633}
 634
 635/**
 636 * lpfc_nvme_ls_req - Issue an NVME Link Service request
 637 * @pnvme_lport: Transport localport that LS is to be issued from.
 638 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 639 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 640 *
 641 * Driver registers this routine to handle any link service request
 642 * from the nvme_fc transport to a remote nvme-aware port.
 643 *
 644 * Return value :
 645 *   0 - Success
 646 *   non-zero: various error codes, in form of -Exxx
 647 **/
 648static int
 649lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
 650                 struct nvme_fc_remote_port *pnvme_rport,
 651                 struct nvmefc_ls_req *pnvme_lsreq)
 652{
 653        struct lpfc_nvme_lport *lport;
 654        struct lpfc_nvme_rport *rport;
 655        struct lpfc_vport *vport;
 656        int ret;
 657
 658        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 659        rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
 660        if (unlikely(!lport) || unlikely(!rport))
 661                return -EINVAL;
 662
 663        vport = lport->vport;
 664        if (vport->load_flag & FC_UNLOADING)
 665                return -ENODEV;
 666
 667        atomic_inc(&lport->fc4NvmeLsRequests);
 668
 669        ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
 670                                 lpfc_nvme_ls_req_cmp);
 671        if (ret)
 672                atomic_inc(&lport->xmt_ls_err);
 673
 674        return ret;
 675}
 676
 677/**
 678 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
 679 *         NVME LS request
 680 * @vport: The local port that issued the LS
 681 * @ndlp: The remote port the LS was sent to
 682 * @pnvme_lsreq: Pointer to LS request structure from the transport
 683 *
 684 * The driver validates the ndlp, looks for the LS, and aborts the
 685 * LS if found.
 686 *
 687 * Returns:
 688 * 0 : if LS found and aborted
 689 * non-zero: various error conditions in form -Exxx
 690 **/
 691int
 692__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 693                        struct nvmefc_ls_req *pnvme_lsreq)
 694{
 695        struct lpfc_hba *phba = vport->phba;
 696        struct lpfc_sli_ring *pring;
 697        struct lpfc_iocbq *wqe, *next_wqe;
 698        bool foundit = false;
 699
 700        if (!ndlp) {
 701                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 702                                "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
 703                                "x%06x, Failing LS Req\n",
 704                                ndlp, ndlp ? ndlp->nlp_DID : 0);
 705                return -EINVAL;
 706        }
 707
 708        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
 709                         "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
 710                         "x%p rqstlen:%d rsplen:%d %pad %pad\n",
 711                         pnvme_lsreq, pnvme_lsreq->rqstlen,
 712                         pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 713                         &pnvme_lsreq->rspdma);
 714
 715        /*
 716         * Lock the ELS ring txcmplq and look for the wqe that matches
 717         * this ELS. If found, issue an abort on the wqe.
 718         */
 719        pring = phba->sli4_hba.nvmels_wq->pring;
 720        spin_lock_irq(&phba->hbalock);
 721        spin_lock(&pring->ring_lock);
 722        list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
 723                if (wqe->context2 == pnvme_lsreq) {
 724                        wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
 725                        foundit = true;
 726                        break;
 727                }
 728        }
 729        spin_unlock(&pring->ring_lock);
 730
 731        if (foundit)
 732                lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
 733        spin_unlock_irq(&phba->hbalock);
 734
 735        if (foundit)
 736                return 0;
 737
 738        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
 739                         "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
 740                         pnvme_lsreq);
 741        return -EINVAL;
 742}
 743
 744static int
 745lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
 746                     struct nvme_fc_remote_port *remoteport,
 747                     struct nvmefc_ls_rsp *ls_rsp)
 748{
 749        struct lpfc_async_xchg_ctx *axchg =
 750                container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
 751        struct lpfc_nvme_lport *lport;
 752        int rc;
 753
 754        if (axchg->phba->pport->load_flag & FC_UNLOADING)
 755                return -ENODEV;
 756
 757        lport = (struct lpfc_nvme_lport *)localport->private;
 758
 759        rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
 760
 761        if (rc) {
 762                /*
 763                 * unless the failure is due to having already sent
 764                 * the response, an abort will be generated for the
 765                 * exchange if the rsp can't be sent.
 766                 */
 767                if (rc != -EALREADY)
 768                        atomic_inc(&lport->xmt_ls_abort);
 769                return rc;
 770        }
 771
 772        return 0;
 773}
 774
 775/**
 776 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
 777 * @pnvme_lport: Transport localport that LS is to be issued from.
 778 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 779 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 780 *
 781 * Driver registers this routine to abort a NVME LS request that is
 782 * in progress (from the transports perspective).
 783 **/
 784static void
 785lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
 786                   struct nvme_fc_remote_port *pnvme_rport,
 787                   struct nvmefc_ls_req *pnvme_lsreq)
 788{
 789        struct lpfc_nvme_lport *lport;
 790        struct lpfc_vport *vport;
 791        struct lpfc_nodelist *ndlp;
 792        int ret;
 793
 794        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 795        if (unlikely(!lport))
 796                return;
 797        vport = lport->vport;
 798
 799        if (vport->load_flag & FC_UNLOADING)
 800                return;
 801
 802        ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
 803
 804        ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
 805        if (!ret)
 806                atomic_inc(&lport->xmt_ls_abort);
 807}
 808
 809/* Fix up the existing sgls for NVME IO. */
 810static inline void
 811lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
 812                       struct lpfc_io_buf *lpfc_ncmd,
 813                       struct nvmefc_fcp_req *nCmd)
 814{
 815        struct lpfc_hba  *phba = vport->phba;
 816        struct sli4_sge *sgl;
 817        union lpfc_wqe128 *wqe;
 818        uint32_t *wptr, *dptr;
 819
 820        /*
 821         * Get a local pointer to the built-in wqe and correct
 822         * the cmd size to match NVME's 96 bytes and fix
 823         * the dma address.
 824         */
 825
 826        wqe = &lpfc_ncmd->cur_iocbq.wqe;
 827
 828        /*
 829         * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
 830         * match NVME.  NVME sends 96 bytes. Also, use the
 831         * nvme commands command and response dma addresses
 832         * rather than the virtual memory to ease the restore
 833         * operation.
 834         */
 835        sgl = lpfc_ncmd->dma_sgl;
 836        sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
 837        if (phba->cfg_nvme_embed_cmd) {
 838                sgl->addr_hi = 0;
 839                sgl->addr_lo = 0;
 840
 841                /* Word 0-2 - NVME CMND IU (embedded payload) */
 842                wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
 843                wqe->generic.bde.tus.f.bdeSize = 56;
 844                wqe->generic.bde.addrHigh = 0;
 845                wqe->generic.bde.addrLow =  64;  /* Word 16 */
 846
 847                /* Word 10  - dbde is 0, wqes is 1 in template */
 848
 849                /*
 850                 * Embed the payload in the last half of the WQE
 851                 * WQE words 16-30 get the NVME CMD IU payload
 852                 *
 853                 * WQE words 16-19 get payload Words 1-4
 854                 * WQE words 20-21 get payload Words 6-7
 855                 * WQE words 22-29 get payload Words 16-23
 856                 */
 857                wptr = &wqe->words[16];  /* WQE ptr */
 858                dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
 859                dptr++;                 /* Skip Word 0 in payload */
 860
 861                *wptr++ = *dptr++;      /* Word 1 */
 862                *wptr++ = *dptr++;      /* Word 2 */
 863                *wptr++ = *dptr++;      /* Word 3 */
 864                *wptr++ = *dptr++;      /* Word 4 */
 865                dptr++;                 /* Skip Word 5 in payload */
 866                *wptr++ = *dptr++;      /* Word 6 */
 867                *wptr++ = *dptr++;      /* Word 7 */
 868                dptr += 8;              /* Skip Words 8-15 in payload */
 869                *wptr++ = *dptr++;      /* Word 16 */
 870                *wptr++ = *dptr++;      /* Word 17 */
 871                *wptr++ = *dptr++;      /* Word 18 */
 872                *wptr++ = *dptr++;      /* Word 19 */
 873                *wptr++ = *dptr++;      /* Word 20 */
 874                *wptr++ = *dptr++;      /* Word 21 */
 875                *wptr++ = *dptr++;      /* Word 22 */
 876                *wptr   = *dptr;        /* Word 23 */
 877        } else {
 878                sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
 879                sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
 880
 881                /* Word 0-2 - NVME CMND IU Inline BDE */
 882                wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
 883                wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
 884                wqe->generic.bde.addrHigh = sgl->addr_hi;
 885                wqe->generic.bde.addrLow =  sgl->addr_lo;
 886
 887                /* Word 10 */
 888                bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
 889                bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
 890        }
 891
 892        sgl++;
 893
 894        /* Setup the physical region for the FCP RSP */
 895        sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
 896        sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
 897        sgl->word2 = le32_to_cpu(sgl->word2);
 898        if (nCmd->sg_cnt)
 899                bf_set(lpfc_sli4_sge_last, sgl, 0);
 900        else
 901                bf_set(lpfc_sli4_sge_last, sgl, 1);
 902        sgl->word2 = cpu_to_le32(sgl->word2);
 903        sgl->sge_len = cpu_to_le32(nCmd->rsplen);
 904}
 905
 906
 907/*
 908 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
 909 *
 910 * Driver registers this routine as it io request handler.  This
 911 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 912 * data structure to the rport indicated in @lpfc_nvme_rport.
 913 *
 914 * Return value :
 915 *   0 - Success
 916 *   TODO: What are the failure codes.
 917 **/
 918static void
 919lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 920                          struct lpfc_wcqe_complete *wcqe)
 921{
 922        struct lpfc_io_buf *lpfc_ncmd =
 923                (struct lpfc_io_buf *)pwqeIn->context1;
 924        struct lpfc_vport *vport = pwqeIn->vport;
 925        struct nvmefc_fcp_req *nCmd;
 926        struct nvme_fc_ersp_iu *ep;
 927        struct nvme_fc_cmd_iu *cp;
 928        struct lpfc_nodelist *ndlp;
 929        struct lpfc_nvme_fcpreq_priv *freqpriv;
 930        struct lpfc_nvme_lport *lport;
 931        uint32_t code, status, idx;
 932        uint16_t cid, sqhd, data;
 933        uint32_t *ptr;
 934#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 935        int cpu;
 936#endif
 937
 938        /* Sanity check on return of outstanding command */
 939        if (!lpfc_ncmd) {
 940                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 941                                 "6071 Null lpfc_ncmd pointer. No "
 942                                 "release, skip completion\n");
 943                return;
 944        }
 945
 946        /* Guard against abort handler being called at same time */
 947        spin_lock(&lpfc_ncmd->buf_lock);
 948
 949        if (!lpfc_ncmd->nvmeCmd) {
 950                spin_unlock(&lpfc_ncmd->buf_lock);
 951                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 952                                 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
 953                                 "nvmeCmd x%px\n",
 954                                 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
 955
 956                /* Release the lpfc_ncmd regardless of the missing elements. */
 957                lpfc_release_nvme_buf(phba, lpfc_ncmd);
 958                return;
 959        }
 960        nCmd = lpfc_ncmd->nvmeCmd;
 961        status = bf_get(lpfc_wcqe_c_status, wcqe);
 962
 963        idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
 964        phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
 965
 966        if (unlikely(status && vport->localport)) {
 967                lport = (struct lpfc_nvme_lport *)vport->localport->private;
 968                if (lport) {
 969                        if (bf_get(lpfc_wcqe_c_xb, wcqe))
 970                                atomic_inc(&lport->cmpl_fcp_xb);
 971                        atomic_inc(&lport->cmpl_fcp_err);
 972                }
 973        }
 974
 975        lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
 976                         lpfc_ncmd->cur_iocbq.sli4_xritag,
 977                         status, wcqe->parameter);
 978        /*
 979         * Catch race where our node has transitioned, but the
 980         * transport is still transitioning.
 981         */
 982        ndlp = lpfc_ncmd->ndlp;
 983        if (!ndlp) {
 984                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 985                                 "6062 Ignoring NVME cmpl.  No ndlp\n");
 986                goto out_err;
 987        }
 988
 989        code = bf_get(lpfc_wcqe_c_code, wcqe);
 990        if (code == CQE_CODE_NVME_ERSP) {
 991                /* For this type of CQE, we need to rebuild the rsp */
 992                ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
 993
 994                /*
 995                 * Get Command Id from cmd to plug into response. This
 996                 * code is not needed in the next NVME Transport drop.
 997                 */
 998                cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
 999                cid = cp->sqe.common.command_id;
1000
1001                /*
1002                 * RSN is in CQE word 2
1003                 * SQHD is in CQE Word 3 bits 15:0
1004                 * Cmd Specific info is in CQE Word 1
1005                 * and in CQE Word 0 bits 15:0
1006                 */
1007                sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1008
1009                /* Now lets build the NVME ERSP IU */
1010                ep->iu_len = cpu_to_be16(8);
1011                ep->rsn = wcqe->parameter;
1012                ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1013                ep->rsvd12 = 0;
1014                ptr = (uint32_t *)&ep->cqe.result.u64;
1015                *ptr++ = wcqe->total_data_placed;
1016                data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1017                *ptr = (uint32_t)data;
1018                ep->cqe.sq_head = sqhd;
1019                ep->cqe.sq_id =  nCmd->sqid;
1020                ep->cqe.command_id = cid;
1021                ep->cqe.status = 0;
1022
1023                lpfc_ncmd->status = IOSTAT_SUCCESS;
1024                lpfc_ncmd->result = 0;
1025                nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1026                nCmd->transferred_length = nCmd->payload_length;
1027        } else {
1028                lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1029                lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1030
1031                /* For NVME, the only failure path that results in an
1032                 * IO error is when the adapter rejects it.  All other
1033                 * conditions are a success case and resolved by the
1034                 * transport.
1035                 * IOSTAT_FCP_RSP_ERROR means:
1036                 * 1. Length of data received doesn't match total
1037                 *    transfer length in WQE
1038                 * 2. If the RSP payload does NOT match these cases:
1039                 *    a. RSP length 12/24 bytes and all zeros
1040                 *    b. NVME ERSP
1041                 */
1042                switch (lpfc_ncmd->status) {
1043                case IOSTAT_SUCCESS:
1044                        nCmd->transferred_length = wcqe->total_data_placed;
1045                        nCmd->rcv_rsplen = 0;
1046                        nCmd->status = 0;
1047                        break;
1048                case IOSTAT_FCP_RSP_ERROR:
1049                        nCmd->transferred_length = wcqe->total_data_placed;
1050                        nCmd->rcv_rsplen = wcqe->parameter;
1051                        nCmd->status = 0;
1052                        /* Sanity check */
1053                        if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1054                                break;
1055                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1056                                         "6081 NVME Completion Protocol Error: "
1057                                         "xri %x status x%x result x%x "
1058                                         "placed x%x\n",
1059                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1060                                         lpfc_ncmd->status, lpfc_ncmd->result,
1061                                         wcqe->total_data_placed);
1062                        break;
1063                case IOSTAT_LOCAL_REJECT:
1064                        /* Let fall through to set command final state. */
1065                        if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1066                                lpfc_printf_vlog(vport, KERN_INFO,
1067                                         LOG_NVME_IOERR,
1068                                         "6032 Delay Aborted cmd x%px "
1069                                         "nvme cmd x%px, xri x%x, "
1070                                         "xb %d\n",
1071                                         lpfc_ncmd, nCmd,
1072                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1073                                         bf_get(lpfc_wcqe_c_xb, wcqe));
1074                        fallthrough;
1075                default:
1076out_err:
1077                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1078                                         "6072 NVME Completion Error: xri %x "
1079                                         "status x%x result x%x [x%x] "
1080                                         "placed x%x\n",
1081                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1082                                         lpfc_ncmd->status, lpfc_ncmd->result,
1083                                         wcqe->parameter,
1084                                         wcqe->total_data_placed);
1085                        nCmd->transferred_length = 0;
1086                        nCmd->rcv_rsplen = 0;
1087                        nCmd->status = NVME_SC_INTERNAL;
1088                }
1089        }
1090
1091        /* pick up SLI4 exhange busy condition */
1092        if (bf_get(lpfc_wcqe_c_xb, wcqe))
1093                lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1094        else
1095                lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1096
1097        /* Update stats and complete the IO.  There is
1098         * no need for dma unprep because the nvme_transport
1099         * owns the dma address.
1100         */
1101#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1102        if (lpfc_ncmd->ts_cmd_start) {
1103                lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1104                lpfc_ncmd->ts_data_io = ktime_get_ns();
1105                phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1106                lpfc_io_ktime(phba, lpfc_ncmd);
1107        }
1108        if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1109                cpu = raw_smp_processor_id();
1110                this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1111                if (lpfc_ncmd->cpu != cpu)
1112                        lpfc_printf_vlog(vport,
1113                                         KERN_INFO, LOG_NVME_IOERR,
1114                                         "6701 CPU Check cmpl: "
1115                                         "cpu %d expect %d\n",
1116                                         cpu, lpfc_ncmd->cpu);
1117        }
1118#endif
1119
1120        /* NVME targets need completion held off until the abort exchange
1121         * completes unless the NVME Rport is getting unregistered.
1122         */
1123
1124        if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1125                freqpriv = nCmd->private;
1126                freqpriv->nvme_buf = NULL;
1127                lpfc_ncmd->nvmeCmd = NULL;
1128                spin_unlock(&lpfc_ncmd->buf_lock);
1129                nCmd->done(nCmd);
1130        } else
1131                spin_unlock(&lpfc_ncmd->buf_lock);
1132
1133        /* Call release with XB=1 to queue the IO into the abort list. */
1134        lpfc_release_nvme_buf(phba, lpfc_ncmd);
1135}
1136
1137
1138/**
1139 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1140 * @vport: pointer to a host virtual N_Port data structure
1141 * @lpfc_ncmd: Pointer to lpfc scsi command
1142 * @pnode: pointer to a node-list data structure
1143 * @cstat: pointer to the control status structure
1144 *
1145 * Driver registers this routine as it io request handler.  This
1146 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1147 * data structure to the rport indicated in @lpfc_nvme_rport.
1148 *
1149 * Return value :
1150 *   0 - Success
1151 *   TODO: What are the failure codes.
1152 **/
1153static int
1154lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1155                      struct lpfc_io_buf *lpfc_ncmd,
1156                      struct lpfc_nodelist *pnode,
1157                      struct lpfc_fc4_ctrl_stat *cstat)
1158{
1159        struct lpfc_hba *phba = vport->phba;
1160        struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1161        struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1162        union lpfc_wqe128 *wqe = &pwqeq->wqe;
1163        uint32_t req_len;
1164
1165        /*
1166         * There are three possibilities here - use scatter-gather segment, use
1167         * the single mapping, or neither.
1168         */
1169        if (nCmd->sg_cnt) {
1170                if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1171                        /* From the iwrite template, initialize words 7 - 11 */
1172                        memcpy(&wqe->words[7],
1173                               &lpfc_iwrite_cmd_template.words[7],
1174                               sizeof(uint32_t) * 5);
1175
1176                        /* Word 4 */
1177                        wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1178
1179                        /* Word 5 */
1180                        if ((phba->cfg_nvme_enable_fb) &&
1181                            (pnode->nlp_flag & NLP_FIRSTBURST)) {
1182                                req_len = lpfc_ncmd->nvmeCmd->payload_length;
1183                                if (req_len < pnode->nvme_fb_size)
1184                                        wqe->fcp_iwrite.initial_xfer_len =
1185                                                req_len;
1186                                else
1187                                        wqe->fcp_iwrite.initial_xfer_len =
1188                                                pnode->nvme_fb_size;
1189                        } else {
1190                                wqe->fcp_iwrite.initial_xfer_len = 0;
1191                        }
1192                        cstat->output_requests++;
1193                } else {
1194                        /* From the iread template, initialize words 7 - 11 */
1195                        memcpy(&wqe->words[7],
1196                               &lpfc_iread_cmd_template.words[7],
1197                               sizeof(uint32_t) * 5);
1198
1199                        /* Word 4 */
1200                        wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1201
1202                        /* Word 5 */
1203                        wqe->fcp_iread.rsrvd5 = 0;
1204
1205                        cstat->input_requests++;
1206                }
1207        } else {
1208                /* From the icmnd template, initialize words 4 - 11 */
1209                memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1210                       sizeof(uint32_t) * 8);
1211                cstat->control_requests++;
1212        }
1213
1214        if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1215                bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1216        /*
1217         * Finish initializing those WQE fields that are independent
1218         * of the nvme_cmnd request_buffer
1219         */
1220
1221        /* Word 3 */
1222        bf_set(payload_offset_len, &wqe->fcp_icmd,
1223               (nCmd->rsplen + nCmd->cmdlen));
1224
1225        /* Word 6 */
1226        bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1227               phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1228        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1229
1230        /* Word 8 */
1231        wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1232
1233        /* Word 9 */
1234        bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1235
1236        /* Word 10 */
1237        bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1238
1239        /* Words 13 14 15 are for PBDE support */
1240
1241        pwqeq->vport = vport;
1242        return 0;
1243}
1244
1245
1246/**
1247 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1248 * @vport: pointer to a host virtual N_Port data structure
1249 * @lpfc_ncmd: Pointer to lpfc scsi command
1250 *
1251 * Driver registers this routine as it io request handler.  This
1252 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1253 * data structure to the rport indicated in @lpfc_nvme_rport.
1254 *
1255 * Return value :
1256 *   0 - Success
1257 *   TODO: What are the failure codes.
1258 **/
1259static int
1260lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1261                      struct lpfc_io_buf *lpfc_ncmd)
1262{
1263        struct lpfc_hba *phba = vport->phba;
1264        struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1265        union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1266        struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1267        struct sli4_hybrid_sgl *sgl_xtra = NULL;
1268        struct scatterlist *data_sg;
1269        struct sli4_sge *first_data_sgl;
1270        struct ulp_bde64 *bde;
1271        dma_addr_t physaddr = 0;
1272        uint32_t num_bde = 0;
1273        uint32_t dma_len = 0;
1274        uint32_t dma_offset = 0;
1275        int nseg, i, j;
1276        bool lsp_just_set = false;
1277
1278        /* Fix up the command and response DMA stuff. */
1279        lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1280
1281        /*
1282         * There are three possibilities here - use scatter-gather segment, use
1283         * the single mapping, or neither.
1284         */
1285        if (nCmd->sg_cnt) {
1286                /*
1287                 * Jump over the cmd and rsp SGEs.  The fix routine
1288                 * has already adjusted for this.
1289                 */
1290                sgl += 2;
1291
1292                first_data_sgl = sgl;
1293                lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1294                if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1295                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1296                                        "6058 Too many sg segments from "
1297                                        "NVME Transport.  Max %d, "
1298                                        "nvmeIO sg_cnt %d\n",
1299                                        phba->cfg_nvme_seg_cnt + 1,
1300                                        lpfc_ncmd->seg_cnt);
1301                        lpfc_ncmd->seg_cnt = 0;
1302                        return 1;
1303                }
1304
1305                /*
1306                 * The driver established a maximum scatter-gather segment count
1307                 * during probe that limits the number of sg elements in any
1308                 * single nvme command.  Just run through the seg_cnt and format
1309                 * the sge's.
1310                 */
1311                nseg = nCmd->sg_cnt;
1312                data_sg = nCmd->first_sgl;
1313
1314                /* for tracking the segment boundaries */
1315                j = 2;
1316                for (i = 0; i < nseg; i++) {
1317                        if (data_sg == NULL) {
1318                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319                                                "6059 dptr err %d, nseg %d\n",
1320                                                i, nseg);
1321                                lpfc_ncmd->seg_cnt = 0;
1322                                return 1;
1323                        }
1324
1325                        sgl->word2 = 0;
1326                        if ((num_bde + 1) == nseg) {
1327                                bf_set(lpfc_sli4_sge_last, sgl, 1);
1328                                bf_set(lpfc_sli4_sge_type, sgl,
1329                                       LPFC_SGE_TYPE_DATA);
1330                        } else {
1331                                bf_set(lpfc_sli4_sge_last, sgl, 0);
1332
1333                                /* expand the segment */
1334                                if (!lsp_just_set &&
1335                                    !((j + 1) % phba->border_sge_num) &&
1336                                    ((nseg - 1) != i)) {
1337                                        /* set LSP type */
1338                                        bf_set(lpfc_sli4_sge_type, sgl,
1339                                               LPFC_SGE_TYPE_LSP);
1340
1341                                        sgl_xtra = lpfc_get_sgl_per_hdwq(
1342                                                        phba, lpfc_ncmd);
1343
1344                                        if (unlikely(!sgl_xtra)) {
1345                                                lpfc_ncmd->seg_cnt = 0;
1346                                                return 1;
1347                                        }
1348                                        sgl->addr_lo = cpu_to_le32(putPaddrLow(
1349                                                       sgl_xtra->dma_phys_sgl));
1350                                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1351                                                       sgl_xtra->dma_phys_sgl));
1352
1353                                } else {
1354                                        bf_set(lpfc_sli4_sge_type, sgl,
1355                                               LPFC_SGE_TYPE_DATA);
1356                                }
1357                        }
1358
1359                        if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1360                                     LPFC_SGE_TYPE_LSP)) {
1361                                if ((nseg - 1) == i)
1362                                        bf_set(lpfc_sli4_sge_last, sgl, 1);
1363
1364                                physaddr = data_sg->dma_address;
1365                                dma_len = data_sg->length;
1366                                sgl->addr_lo = cpu_to_le32(
1367                                                         putPaddrLow(physaddr));
1368                                sgl->addr_hi = cpu_to_le32(
1369                                                        putPaddrHigh(physaddr));
1370
1371                                bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1372                                sgl->word2 = cpu_to_le32(sgl->word2);
1373                                sgl->sge_len = cpu_to_le32(dma_len);
1374
1375                                dma_offset += dma_len;
1376                                data_sg = sg_next(data_sg);
1377
1378                                sgl++;
1379
1380                                lsp_just_set = false;
1381                        } else {
1382                                sgl->word2 = cpu_to_le32(sgl->word2);
1383
1384                                sgl->sge_len = cpu_to_le32(
1385                                                     phba->cfg_sg_dma_buf_size);
1386
1387                                sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1388                                i = i - 1;
1389
1390                                lsp_just_set = true;
1391                        }
1392
1393                        j++;
1394                }
1395                if (phba->cfg_enable_pbde) {
1396                        /* Use PBDE support for first SGL only, offset == 0 */
1397                        /* Words 13-15 */
1398                        bde = (struct ulp_bde64 *)
1399                                &wqe->words[13];
1400                        bde->addrLow = first_data_sgl->addr_lo;
1401                        bde->addrHigh = first_data_sgl->addr_hi;
1402                        bde->tus.f.bdeSize =
1403                                le32_to_cpu(first_data_sgl->sge_len);
1404                        bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1405                        bde->tus.w = cpu_to_le32(bde->tus.w);
1406
1407                        /* Word 11 */
1408                        bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1409                } else {
1410                        memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1411                        bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1412                }
1413
1414        } else {
1415                lpfc_ncmd->seg_cnt = 0;
1416
1417                /* For this clause to be valid, the payload_length
1418                 * and sg_cnt must zero.
1419                 */
1420                if (nCmd->payload_length != 0) {
1421                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1422                                        "6063 NVME DMA Prep Err: sg_cnt %d "
1423                                        "payload_length x%x\n",
1424                                        nCmd->sg_cnt, nCmd->payload_length);
1425                        return 1;
1426                }
1427        }
1428        return 0;
1429}
1430
1431/**
1432 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1433 * @pnvme_lport: Pointer to the driver's local port data
1434 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1435 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1436 * @pnvme_fcreq: IO request from nvme fc to driver.
1437 *
1438 * Driver registers this routine as it io request handler.  This
1439 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1440 * data structure to the rport indicated in @lpfc_nvme_rport.
1441 *
1442 * Return value :
1443 *   0 - Success
1444 *   TODO: What are the failure codes.
1445 **/
1446static int
1447lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1448                        struct nvme_fc_remote_port *pnvme_rport,
1449                        void *hw_queue_handle,
1450                        struct nvmefc_fcp_req *pnvme_fcreq)
1451{
1452        int ret = 0;
1453        int expedite = 0;
1454        int idx, cpu;
1455        struct lpfc_nvme_lport *lport;
1456        struct lpfc_fc4_ctrl_stat *cstat;
1457        struct lpfc_vport *vport;
1458        struct lpfc_hba *phba;
1459        struct lpfc_nodelist *ndlp;
1460        struct lpfc_io_buf *lpfc_ncmd;
1461        struct lpfc_nvme_rport *rport;
1462        struct lpfc_nvme_qhandle *lpfc_queue_info;
1463        struct lpfc_nvme_fcpreq_priv *freqpriv;
1464        struct nvme_common_command *sqe;
1465#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1466        uint64_t start = 0;
1467#endif
1468
1469        /* Validate pointers. LLDD fault handling with transport does
1470         * have timing races.
1471         */
1472        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1473        if (unlikely(!lport)) {
1474                ret = -EINVAL;
1475                goto out_fail;
1476        }
1477
1478        vport = lport->vport;
1479
1480        if (unlikely(!hw_queue_handle)) {
1481                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1482                                 "6117 Fail IO, NULL hw_queue_handle\n");
1483                atomic_inc(&lport->xmt_fcp_err);
1484                ret = -EBUSY;
1485                goto out_fail;
1486        }
1487
1488        phba = vport->phba;
1489
1490        if (unlikely(vport->load_flag & FC_UNLOADING)) {
1491                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1492                                 "6124 Fail IO, Driver unload\n");
1493                atomic_inc(&lport->xmt_fcp_err);
1494                ret = -ENODEV;
1495                goto out_fail;
1496        }
1497
1498        freqpriv = pnvme_fcreq->private;
1499        if (unlikely(!freqpriv)) {
1500                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1501                                 "6158 Fail IO, NULL request data\n");
1502                atomic_inc(&lport->xmt_fcp_err);
1503                ret = -EINVAL;
1504                goto out_fail;
1505        }
1506
1507#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1508        if (phba->ktime_on)
1509                start = ktime_get_ns();
1510#endif
1511        rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1512        lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1513
1514        /*
1515         * Catch race where our node has transitioned, but the
1516         * transport is still transitioning.
1517         */
1518        ndlp = rport->ndlp;
1519        if (!ndlp) {
1520                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1521                                 "6053 Busy IO, ndlp not ready: rport x%px "
1522                                  "ndlp x%px, DID x%06x\n",
1523                                 rport, ndlp, pnvme_rport->port_id);
1524                atomic_inc(&lport->xmt_fcp_err);
1525                ret = -EBUSY;
1526                goto out_fail;
1527        }
1528
1529        /* The remote node has to be a mapped target or it's an error. */
1530        if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1531            (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1532                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1533                                 "6036 Fail IO, DID x%06x not ready for "
1534                                 "IO. State x%x, Type x%x Flg x%x\n",
1535                                 pnvme_rport->port_id,
1536                                 ndlp->nlp_state, ndlp->nlp_type,
1537                                 ndlp->fc4_xpt_flags);
1538                atomic_inc(&lport->xmt_fcp_bad_ndlp);
1539                ret = -EBUSY;
1540                goto out_fail;
1541
1542        }
1543
1544        /* Currently only NVME Keep alive commands should be expedited
1545         * if the driver runs out of a resource. These should only be
1546         * issued on the admin queue, qidx 0
1547         */
1548        if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1549                sqe = &((struct nvme_fc_cmd_iu *)
1550                        pnvme_fcreq->cmdaddr)->sqe.common;
1551                if (sqe->opcode == nvme_admin_keep_alive)
1552                        expedite = 1;
1553        }
1554
1555        /* The node is shared with FCP IO, make sure the IO pending count does
1556         * not exceed the programmed depth.
1557         */
1558        if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1559                if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1560                    !expedite) {
1561                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1562                                         "6174 Fail IO, ndlp qdepth exceeded: "
1563                                         "idx %d DID %x pend %d qdepth %d\n",
1564                                         lpfc_queue_info->index, ndlp->nlp_DID,
1565                                         atomic_read(&ndlp->cmd_pending),
1566                                         ndlp->cmd_qdepth);
1567                        atomic_inc(&lport->xmt_fcp_qdepth);
1568                        ret = -EBUSY;
1569                        goto out_fail;
1570                }
1571        }
1572
1573        /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1574        if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1575                idx = lpfc_queue_info->index;
1576        } else {
1577                cpu = raw_smp_processor_id();
1578                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1579        }
1580
1581        lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1582        if (lpfc_ncmd == NULL) {
1583                atomic_inc(&lport->xmt_fcp_noxri);
1584                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1585                                 "6065 Fail IO, driver buffer pool is empty: "
1586                                 "idx %d DID %x\n",
1587                                 lpfc_queue_info->index, ndlp->nlp_DID);
1588                ret = -EBUSY;
1589                goto out_fail;
1590        }
1591#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1592        if (start) {
1593                lpfc_ncmd->ts_cmd_start = start;
1594                lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1595        } else {
1596                lpfc_ncmd->ts_cmd_start = 0;
1597        }
1598#endif
1599
1600        /*
1601         * Store the data needed by the driver to issue, abort, and complete
1602         * an IO.
1603         * Do not let the IO hang out forever.  There is no midlayer issuing
1604         * an abort so inform the FW of the maximum IO pending time.
1605         */
1606        freqpriv->nvme_buf = lpfc_ncmd;
1607        lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1608        lpfc_ncmd->ndlp = ndlp;
1609        lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1610
1611        /*
1612         * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1613         * This identfier was create in our hardware queue create callback
1614         * routine. The driver now is dependent on the IO queue steering from
1615         * the transport.  We are trusting the upper NVME layers know which
1616         * index to use and that they have affinitized a CPU to this hardware
1617         * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1618         */
1619        lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1620        cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1621
1622        lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1623        ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1624        if (ret) {
1625                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1626                                 "6175 Fail IO, Prep DMA: "
1627                                 "idx %d DID %x\n",
1628                                 lpfc_queue_info->index, ndlp->nlp_DID);
1629                atomic_inc(&lport->xmt_fcp_err);
1630                ret = -ENOMEM;
1631                goto out_free_nvme_buf;
1632        }
1633
1634        lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1635                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1636                         lpfc_queue_info->index, ndlp->nlp_DID);
1637
1638        ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1639        if (ret) {
1640                atomic_inc(&lport->xmt_fcp_wqerr);
1641                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1642                                 "6113 Fail IO, Could not issue WQE err %x "
1643                                 "sid: x%x did: x%x oxid: x%x\n",
1644                                 ret, vport->fc_myDID, ndlp->nlp_DID,
1645                                 lpfc_ncmd->cur_iocbq.sli4_xritag);
1646                goto out_free_nvme_buf;
1647        }
1648
1649        if (phba->cfg_xri_rebalancing)
1650                lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1651
1652#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1653        if (lpfc_ncmd->ts_cmd_start)
1654                lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1655
1656        if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1657                cpu = raw_smp_processor_id();
1658                this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1659                lpfc_ncmd->cpu = cpu;
1660                if (idx != cpu)
1661                        lpfc_printf_vlog(vport,
1662                                         KERN_INFO, LOG_NVME_IOERR,
1663                                        "6702 CPU Check cmd: "
1664                                        "cpu %d wq %d\n",
1665                                        lpfc_ncmd->cpu,
1666                                        lpfc_queue_info->index);
1667        }
1668#endif
1669        return 0;
1670
1671 out_free_nvme_buf:
1672        if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1673                if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1674                        cstat->output_requests--;
1675                else
1676                        cstat->input_requests--;
1677        } else
1678                cstat->control_requests--;
1679        lpfc_release_nvme_buf(phba, lpfc_ncmd);
1680 out_fail:
1681        return ret;
1682}
1683
1684/**
1685 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1686 * @phba: Pointer to HBA context object
1687 * @cmdiocb: Pointer to command iocb object.
1688 * @abts_cmpl: Pointer to wcqe complete object.
1689 *
1690 * This is the callback function for any NVME FCP IO that was aborted.
1691 *
1692 * Return value:
1693 *   None
1694 **/
1695void
1696lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1697                           struct lpfc_wcqe_complete *abts_cmpl)
1698{
1699        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1700                        "6145 ABORT_XRI_CN completing on rpi x%x "
1701                        "original iotag x%x, abort cmd iotag x%x "
1702                        "req_tag x%x, status x%x, hwstatus x%x\n",
1703                        cmdiocb->iocb.un.acxri.abortContextTag,
1704                        cmdiocb->iocb.un.acxri.abortIoTag,
1705                        cmdiocb->iotag,
1706                        bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1707                        bf_get(lpfc_wcqe_c_status, abts_cmpl),
1708                        bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1709        lpfc_sli_release_iocbq(phba, cmdiocb);
1710}
1711
1712/**
1713 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1714 * @pnvme_lport: Pointer to the driver's local port data
1715 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1716 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1717 * @pnvme_fcreq: IO request from nvme fc to driver.
1718 *
1719 * Driver registers this routine as its nvme request io abort handler.  This
1720 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1721 * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1722 * is executed asynchronously - one the target is validated as "MAPPED" and
1723 * ready for IO, the driver issues the abort request and returns.
1724 *
1725 * Return value:
1726 *   None
1727 **/
1728static void
1729lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1730                    struct nvme_fc_remote_port *pnvme_rport,
1731                    void *hw_queue_handle,
1732                    struct nvmefc_fcp_req *pnvme_fcreq)
1733{
1734        struct lpfc_nvme_lport *lport;
1735        struct lpfc_vport *vport;
1736        struct lpfc_hba *phba;
1737        struct lpfc_io_buf *lpfc_nbuf;
1738        struct lpfc_iocbq *nvmereq_wqe;
1739        struct lpfc_nvme_fcpreq_priv *freqpriv;
1740        unsigned long flags;
1741        int ret_val;
1742
1743        /* Validate pointers. LLDD fault handling with transport does
1744         * have timing races.
1745         */
1746        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1747        if (unlikely(!lport))
1748                return;
1749
1750        vport = lport->vport;
1751
1752        if (unlikely(!hw_queue_handle)) {
1753                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1754                                 "6129 Fail Abort, HW Queue Handle NULL.\n");
1755                return;
1756        }
1757
1758        phba = vport->phba;
1759        freqpriv = pnvme_fcreq->private;
1760
1761        if (unlikely(!freqpriv))
1762                return;
1763        if (vport->load_flag & FC_UNLOADING)
1764                return;
1765
1766        /* Announce entry to new IO submit field. */
1767        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1768                         "6002 Abort Request to rport DID x%06x "
1769                         "for nvme_fc_req x%px\n",
1770                         pnvme_rport->port_id,
1771                         pnvme_fcreq);
1772
1773        /* If the hba is getting reset, this flag is set.  It is
1774         * cleared when the reset is complete and rings reestablished.
1775         */
1776        spin_lock_irqsave(&phba->hbalock, flags);
1777        /* driver queued commands are in process of being flushed */
1778        if (phba->hba_flag & HBA_IOQ_FLUSH) {
1779                spin_unlock_irqrestore(&phba->hbalock, flags);
1780                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1781                                 "6139 Driver in reset cleanup - flushing "
1782                                 "NVME Req now.  hba_flag x%x\n",
1783                                 phba->hba_flag);
1784                return;
1785        }
1786
1787        lpfc_nbuf = freqpriv->nvme_buf;
1788        if (!lpfc_nbuf) {
1789                spin_unlock_irqrestore(&phba->hbalock, flags);
1790                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1791                                 "6140 NVME IO req has no matching lpfc nvme "
1792                                 "io buffer.  Skipping abort req.\n");
1793                return;
1794        } else if (!lpfc_nbuf->nvmeCmd) {
1795                spin_unlock_irqrestore(&phba->hbalock, flags);
1796                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1797                                 "6141 lpfc NVME IO req has no nvme_fcreq "
1798                                 "io buffer.  Skipping abort req.\n");
1799                return;
1800        }
1801        nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1802
1803        /* Guard against IO completion being called at same time */
1804        spin_lock(&lpfc_nbuf->buf_lock);
1805
1806        /*
1807         * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1808         * state must match the nvme_fcreq passed by the nvme
1809         * transport.  If they don't match, it is likely the driver
1810         * has already completed the NVME IO and the nvme transport
1811         * has not seen it yet.
1812         */
1813        if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1814                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1815                                 "6143 NVME req mismatch: "
1816                                 "lpfc_nbuf x%px nvmeCmd x%px, "
1817                                 "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
1818                                 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1819                                 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1820                goto out_unlock;
1821        }
1822
1823        /* Don't abort IOs no longer on the pending queue. */
1824        if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1825                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1826                                 "6142 NVME IO req x%px not queued - skipping "
1827                                 "abort req xri x%x\n",
1828                                 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1829                goto out_unlock;
1830        }
1831
1832        atomic_inc(&lport->xmt_fcp_abort);
1833        lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1834                         nvmereq_wqe->sli4_xritag,
1835                         nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1836
1837        /* Outstanding abort is in progress */
1838        if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1839                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1840                                 "6144 Outstanding NVME I/O Abort Request "
1841                                 "still pending on nvme_fcreq x%px, "
1842                                 "lpfc_ncmd %px xri x%x\n",
1843                                 pnvme_fcreq, lpfc_nbuf,
1844                                 nvmereq_wqe->sli4_xritag);
1845                goto out_unlock;
1846        }
1847
1848        ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1849                                              lpfc_nvme_abort_fcreq_cmpl);
1850
1851        spin_unlock(&lpfc_nbuf->buf_lock);
1852        spin_unlock_irqrestore(&phba->hbalock, flags);
1853
1854        /* Make sure HBA is alive */
1855        lpfc_issue_hb_tmo(phba);
1856
1857        if (ret_val != WQE_SUCCESS) {
1858                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1859                                 "6137 Failed abts issue_wqe with status x%x "
1860                                 "for nvme_fcreq x%px.\n",
1861                                 ret_val, pnvme_fcreq);
1862                return;
1863        }
1864
1865        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1866                         "6138 Transport Abort NVME Request Issued for "
1867                         "ox_id x%x\n",
1868                         nvmereq_wqe->sli4_xritag);
1869        return;
1870
1871out_unlock:
1872        spin_unlock(&lpfc_nbuf->buf_lock);
1873        spin_unlock_irqrestore(&phba->hbalock, flags);
1874        return;
1875}
1876
1877/* Declare and initialization an instance of the FC NVME template. */
1878static struct nvme_fc_port_template lpfc_nvme_template = {
1879        /* initiator-based functions */
1880        .localport_delete  = lpfc_nvme_localport_delete,
1881        .remoteport_delete = lpfc_nvme_remoteport_delete,
1882        .create_queue = lpfc_nvme_create_queue,
1883        .delete_queue = lpfc_nvme_delete_queue,
1884        .ls_req       = lpfc_nvme_ls_req,
1885        .fcp_io       = lpfc_nvme_fcp_io_submit,
1886        .ls_abort     = lpfc_nvme_ls_abort,
1887        .fcp_abort    = lpfc_nvme_fcp_abort,
1888        .xmt_ls_rsp   = lpfc_nvme_xmt_ls_rsp,
1889
1890        .max_hw_queues = 1,
1891        .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1892        .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1893        .dma_boundary = 0xFFFFFFFF,
1894
1895        /* Sizes of additional private data for data structures.
1896         * No use for the last two sizes at this time.
1897         */
1898        .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1899        .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1900        .lsrqst_priv_sz = 0,
1901        .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1902};
1903
1904/*
1905 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1906 *
1907 * This routine removes a nvme buffer from head of @hdwq io_buf_list
1908 * and returns to caller.
1909 *
1910 * Return codes:
1911 *   NULL - Error
1912 *   Pointer to lpfc_nvme_buf - Success
1913 **/
1914static struct lpfc_io_buf *
1915lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1916                  int idx, int expedite)
1917{
1918        struct lpfc_io_buf *lpfc_ncmd;
1919        struct lpfc_sli4_hdw_queue *qp;
1920        struct sli4_sge *sgl;
1921        struct lpfc_iocbq *pwqeq;
1922        union lpfc_wqe128 *wqe;
1923
1924        lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1925
1926        if (lpfc_ncmd) {
1927                pwqeq = &(lpfc_ncmd->cur_iocbq);
1928                wqe = &pwqeq->wqe;
1929
1930                /* Setup key fields in buffer that may have been changed
1931                 * if other protocols used this buffer.
1932                 */
1933                pwqeq->iocb_flag = LPFC_IO_NVME;
1934                pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1935                lpfc_ncmd->start_time = jiffies;
1936                lpfc_ncmd->flags = 0;
1937
1938                /* Rsp SGE will be filled in when we rcv an IO
1939                 * from the NVME Layer to be sent.
1940                 * The cmd is going to be embedded so we need a SKIP SGE.
1941                 */
1942                sgl = lpfc_ncmd->dma_sgl;
1943                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1944                bf_set(lpfc_sli4_sge_last, sgl, 0);
1945                sgl->word2 = cpu_to_le32(sgl->word2);
1946                /* Fill in word 3 / sgl_len during cmd submission */
1947
1948                /* Initialize 64 bytes only */
1949                memset(wqe, 0, sizeof(union lpfc_wqe));
1950
1951                if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1952                        atomic_inc(&ndlp->cmd_pending);
1953                        lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1954                }
1955
1956        } else {
1957                qp = &phba->sli4_hba.hdwq[idx];
1958                qp->empty_io_bufs++;
1959        }
1960
1961        return  lpfc_ncmd;
1962}
1963
1964/**
1965 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
1966 * @phba: The Hba for which this call is being executed.
1967 * @lpfc_ncmd: The nvme buffer which is being released.
1968 *
1969 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
1970 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
1971 * and cannot be reused for at least RA_TOV amount of time if it was
1972 * aborted.
1973 **/
1974static void
1975lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
1976{
1977        struct lpfc_sli4_hdw_queue *qp;
1978        unsigned long iflag = 0;
1979
1980        if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
1981                atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
1982
1983        lpfc_ncmd->ndlp = NULL;
1984        lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
1985
1986        qp = lpfc_ncmd->hdwq;
1987        if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1988                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1989                                "6310 XB release deferred for "
1990                                "ox_id x%x on reqtag x%x\n",
1991                                lpfc_ncmd->cur_iocbq.sli4_xritag,
1992                                lpfc_ncmd->cur_iocbq.iotag);
1993
1994                spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
1995                list_add_tail(&lpfc_ncmd->list,
1996                        &qp->lpfc_abts_io_buf_list);
1997                qp->abts_nvme_io_bufs++;
1998                spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
1999        } else
2000                lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2001}
2002
2003/**
2004 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2005 * @vport - the lpfc_vport instance requesting a localport.
2006 *
2007 * This routine is invoked to create an nvme localport instance to bind
2008 * to the nvme_fc_transport.  It is called once during driver load
2009 * like lpfc_create_shost after all other services are initialized.
2010 * It requires a vport, vpi, and wwns at call time.  Other localport
2011 * parameters are modified as the driver's FCID and the Fabric WWN
2012 * are established.
2013 *
2014 * Return codes
2015 *      0 - successful
2016 *      -ENOMEM - no heap memory available
2017 *      other values - from nvme registration upcall
2018 **/
2019int
2020lpfc_nvme_create_localport(struct lpfc_vport *vport)
2021{
2022        int ret = 0;
2023        struct lpfc_hba  *phba = vport->phba;
2024        struct nvme_fc_port_info nfcp_info;
2025        struct nvme_fc_local_port *localport;
2026        struct lpfc_nvme_lport *lport;
2027
2028        /* Initialize this localport instance.  The vport wwn usage ensures
2029         * that NPIV is accounted for.
2030         */
2031        memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2032        nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2033        nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2034        nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2035
2036        /* We need to tell the transport layer + 1 because it takes page
2037         * alignment into account. When space for the SGL is allocated we
2038         * allocate + 3, one for cmd, one for rsp and one for this alignment
2039         */
2040        lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2041
2042        /* Advertise how many hw queues we support based on cfg_hdw_queue,
2043         * which will not exceed cpu count.
2044         */
2045        lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2046
2047        if (!IS_ENABLED(CONFIG_NVME_FC))
2048                return ret;
2049
2050        /* localport is allocated from the stack, but the registration
2051         * call allocates heap memory as well as the private area.
2052         */
2053
2054        ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2055                                         &vport->phba->pcidev->dev, &localport);
2056        if (!ret) {
2057                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2058                                 "6005 Successfully registered local "
2059                                 "NVME port num %d, localP x%px, private "
2060                                 "x%px, sg_seg %d\n",
2061                                 localport->port_num, localport,
2062                                 localport->private,
2063                                 lpfc_nvme_template.max_sgl_segments);
2064
2065                /* Private is our lport size declared in the template. */
2066                lport = (struct lpfc_nvme_lport *)localport->private;
2067                vport->localport = localport;
2068                lport->vport = vport;
2069                vport->nvmei_support = 1;
2070
2071                atomic_set(&lport->xmt_fcp_noxri, 0);
2072                atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2073                atomic_set(&lport->xmt_fcp_qdepth, 0);
2074                atomic_set(&lport->xmt_fcp_err, 0);
2075                atomic_set(&lport->xmt_fcp_wqerr, 0);
2076                atomic_set(&lport->xmt_fcp_abort, 0);
2077                atomic_set(&lport->xmt_ls_abort, 0);
2078                atomic_set(&lport->xmt_ls_err, 0);
2079                atomic_set(&lport->cmpl_fcp_xb, 0);
2080                atomic_set(&lport->cmpl_fcp_err, 0);
2081                atomic_set(&lport->cmpl_ls_xb, 0);
2082                atomic_set(&lport->cmpl_ls_err, 0);
2083
2084                atomic_set(&lport->fc4NvmeLsRequests, 0);
2085                atomic_set(&lport->fc4NvmeLsCmpls, 0);
2086        }
2087
2088        return ret;
2089}
2090
2091#if (IS_ENABLED(CONFIG_NVME_FC))
2092/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2093 *
2094 * The driver has to wait for the host nvme transport to callback
2095 * indicating the localport has successfully unregistered all
2096 * resources.  Since this is an uninterruptible wait, loop every ten
2097 * seconds and print a message indicating no progress.
2098 *
2099 * An uninterruptible wait is used because of the risk of transport-to-
2100 * driver state mismatch.
2101 */
2102static void
2103lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2104                           struct lpfc_nvme_lport *lport,
2105                           struct completion *lport_unreg_cmp)
2106{
2107        u32 wait_tmo;
2108        int ret, i, pending = 0;
2109        struct lpfc_sli_ring  *pring;
2110        struct lpfc_hba  *phba = vport->phba;
2111        struct lpfc_sli4_hdw_queue *qp;
2112        int abts_scsi, abts_nvme;
2113
2114        /* Host transport has to clean up and confirm requiring an indefinite
2115         * wait. Print a message if a 10 second wait expires and renew the
2116         * wait. This is unexpected.
2117         */
2118        wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2119        while (true) {
2120                ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2121                if (unlikely(!ret)) {
2122                        pending = 0;
2123                        abts_scsi = 0;
2124                        abts_nvme = 0;
2125                        for (i = 0; i < phba->cfg_hdw_queue; i++) {
2126                                qp = &phba->sli4_hba.hdwq[i];
2127                                pring = qp->io_wq->pring;
2128                                if (!pring)
2129                                        continue;
2130                                pending += pring->txcmplq_cnt;
2131                                abts_scsi += qp->abts_scsi_io_bufs;
2132                                abts_nvme += qp->abts_nvme_io_bufs;
2133                        }
2134                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2135                                         "6176 Lport x%px Localport x%px wait "
2136                                         "timed out. Pending %d [%d:%d]. "
2137                                         "Renewing.\n",
2138                                         lport, vport->localport, pending,
2139                                         abts_scsi, abts_nvme);
2140                        continue;
2141                }
2142                break;
2143        }
2144        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2145                         "6177 Lport x%px Localport x%px Complete Success\n",
2146                         lport, vport->localport);
2147}
2148#endif
2149
2150/**
2151 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2152 * @vport: pointer to a host virtual N_Port data structure
2153 *
2154 * This routine is invoked to destroy all lports bound to the phba.
2155 * The lport memory was allocated by the nvme fc transport and is
2156 * released there.  This routine ensures all rports bound to the
2157 * lport have been disconnected.
2158 *
2159 **/
2160void
2161lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2162{
2163#if (IS_ENABLED(CONFIG_NVME_FC))
2164        struct nvme_fc_local_port *localport;
2165        struct lpfc_nvme_lport *lport;
2166        int ret;
2167        DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2168
2169        if (vport->nvmei_support == 0)
2170                return;
2171
2172        localport = vport->localport;
2173        lport = (struct lpfc_nvme_lport *)localport->private;
2174
2175        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2176                         "6011 Destroying NVME localport x%px\n",
2177                         localport);
2178
2179        /* lport's rport list is clear.  Unregister
2180         * lport and release resources.
2181         */
2182        lport->lport_unreg_cmp = &lport_unreg_cmp;
2183        ret = nvme_fc_unregister_localport(localport);
2184
2185        /* Wait for completion.  This either blocks
2186         * indefinitely or succeeds
2187         */
2188        lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2189        vport->localport = NULL;
2190
2191        /* Regardless of the unregister upcall response, clear
2192         * nvmei_support.  All rports are unregistered and the
2193         * driver will clean up.
2194         */
2195        vport->nvmei_support = 0;
2196        if (ret == 0) {
2197                lpfc_printf_vlog(vport,
2198                                 KERN_INFO, LOG_NVME_DISC,
2199                                 "6009 Unregistered lport Success\n");
2200        } else {
2201                lpfc_printf_vlog(vport,
2202                                 KERN_INFO, LOG_NVME_DISC,
2203                                 "6010 Unregistered lport "
2204                                 "Failed, status x%x\n",
2205                                 ret);
2206        }
2207#endif
2208}
2209
2210void
2211lpfc_nvme_update_localport(struct lpfc_vport *vport)
2212{
2213#if (IS_ENABLED(CONFIG_NVME_FC))
2214        struct nvme_fc_local_port *localport;
2215        struct lpfc_nvme_lport *lport;
2216
2217        localport = vport->localport;
2218        if (!localport) {
2219                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2220                                 "6710 Update NVME fail. No localport\n");
2221                return;
2222        }
2223        lport = (struct lpfc_nvme_lport *)localport->private;
2224        if (!lport) {
2225                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2226                                 "6171 Update NVME fail. localP x%px, No lport\n",
2227                                 localport);
2228                return;
2229        }
2230        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2231                         "6012 Update NVME lport x%px did x%x\n",
2232                         localport, vport->fc_myDID);
2233
2234        localport->port_id = vport->fc_myDID;
2235        if (localport->port_id == 0)
2236                localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2237        else
2238                localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2239
2240        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2241                         "6030 bound lport x%px to DID x%06x\n",
2242                         lport, localport->port_id);
2243#endif
2244}
2245
2246int
2247lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2248{
2249#if (IS_ENABLED(CONFIG_NVME_FC))
2250        int ret = 0;
2251        struct nvme_fc_local_port *localport;
2252        struct lpfc_nvme_lport *lport;
2253        struct lpfc_nvme_rport *rport;
2254        struct lpfc_nvme_rport *oldrport;
2255        struct nvme_fc_remote_port *remote_port;
2256        struct nvme_fc_port_info rpinfo;
2257        struct lpfc_nodelist *prev_ndlp = NULL;
2258        struct fc_rport *srport = ndlp->rport;
2259
2260        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2261                         "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2262                         ndlp->nlp_DID, ndlp->nlp_type);
2263
2264        localport = vport->localport;
2265        if (!localport)
2266                return 0;
2267
2268        lport = (struct lpfc_nvme_lport *)localport->private;
2269
2270        /* NVME rports are not preserved across devloss.
2271         * Just register this instance.  Note, rpinfo->dev_loss_tmo
2272         * is left 0 to indicate accept transport defaults.  The
2273         * driver communicates port role capabilities consistent
2274         * with the PRLI response data.
2275         */
2276        memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2277        rpinfo.port_id = ndlp->nlp_DID;
2278        if (ndlp->nlp_type & NLP_NVME_TARGET)
2279                rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2280        if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2281                rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2282
2283        if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2284                rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2285
2286        rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2287        rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2288        if (srport)
2289                rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2290        else
2291                rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2292
2293        spin_lock_irq(&ndlp->lock);
2294        oldrport = lpfc_ndlp_get_nrport(ndlp);
2295        if (oldrport) {
2296                prev_ndlp = oldrport->ndlp;
2297                spin_unlock_irq(&ndlp->lock);
2298        } else {
2299                spin_unlock_irq(&ndlp->lock);
2300                if (!lpfc_nlp_get(ndlp)) {
2301                        dev_warn(&vport->phba->pcidev->dev,
2302                                 "Warning - No node ref - exit register\n");
2303                        return 0;
2304                }
2305        }
2306
2307        ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2308        if (!ret) {
2309                /* If the ndlp already has an nrport, this is just
2310                 * a resume of the existing rport.  Else this is a
2311                 * new rport.
2312                 */
2313                /* Guard against an unregister/reregister
2314                 * race that leaves the WAIT flag set.
2315                 */
2316                spin_lock_irq(&ndlp->lock);
2317                ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2318                ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2319                spin_unlock_irq(&ndlp->lock);
2320                rport = remote_port->private;
2321                if (oldrport) {
2322
2323                        /* Sever the ndlp<->rport association
2324                         * before dropping the ndlp ref from
2325                         * register.
2326                         */
2327                        spin_lock_irq(&ndlp->lock);
2328                        ndlp->nrport = NULL;
2329                        ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2330                        spin_unlock_irq(&ndlp->lock);
2331                        rport->ndlp = NULL;
2332                        rport->remoteport = NULL;
2333
2334                        /* Reference only removed if previous NDLP is no longer
2335                         * active. It might be just a swap and removing the
2336                         * reference would cause a premature cleanup.
2337                         */
2338                        if (prev_ndlp && prev_ndlp != ndlp) {
2339                                if (!prev_ndlp->nrport)
2340                                        lpfc_nlp_put(prev_ndlp);
2341                        }
2342                }
2343
2344                /* Clean bind the rport to the ndlp. */
2345                rport->remoteport = remote_port;
2346                rport->lport = lport;
2347                rport->ndlp = ndlp;
2348                spin_lock_irq(&ndlp->lock);
2349                ndlp->nrport = rport;
2350                spin_unlock_irq(&ndlp->lock);
2351                lpfc_printf_vlog(vport, KERN_INFO,
2352                                 LOG_NVME_DISC | LOG_NODE,
2353                                 "6022 Bind lport x%px to remoteport x%px "
2354                                 "rport x%px WWNN 0x%llx, "
2355                                 "Rport WWPN 0x%llx DID "
2356                                 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2357                                 lport, remote_port, rport,
2358                                 rpinfo.node_name, rpinfo.port_name,
2359                                 rpinfo.port_id, rpinfo.port_role,
2360                                 ndlp, prev_ndlp);
2361        } else {
2362                lpfc_printf_vlog(vport, KERN_ERR,
2363                                 LOG_TRACE_EVENT,
2364                                 "6031 RemotePort Registration failed "
2365                                 "err: %d, DID x%06x\n",
2366                                 ret, ndlp->nlp_DID);
2367        }
2368
2369        return ret;
2370#else
2371        return 0;
2372#endif
2373}
2374
2375/*
2376 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2377 *
2378 * If the ndlp represents an NVME Target, that we are logged into,
2379 * ping the NVME FC Transport layer to initiate a device rescan
2380 * on this remote NPort.
2381 */
2382void
2383lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2384{
2385#if (IS_ENABLED(CONFIG_NVME_FC))
2386        struct lpfc_nvme_rport *nrport;
2387        struct nvme_fc_remote_port *remoteport = NULL;
2388
2389        spin_lock_irq(&ndlp->lock);
2390        nrport = lpfc_ndlp_get_nrport(ndlp);
2391        if (nrport)
2392                remoteport = nrport->remoteport;
2393        spin_unlock_irq(&ndlp->lock);
2394
2395        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2396                         "6170 Rescan NPort DID x%06x type x%x "
2397                         "state x%x nrport x%px remoteport x%px\n",
2398                         ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2399                         nrport, remoteport);
2400
2401        if (!nrport || !remoteport)
2402                goto rescan_exit;
2403
2404        /* Only rescan if we are an NVME target in the MAPPED state */
2405        if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2406            ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2407                nvme_fc_rescan_remoteport(remoteport);
2408
2409                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2410                                 "6172 NVME rescanned DID x%06x "
2411                                 "port_state x%x\n",
2412                                 ndlp->nlp_DID, remoteport->port_state);
2413        }
2414        return;
2415 rescan_exit:
2416        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2417                         "6169 Skip NVME Rport Rescan, NVME remoteport "
2418                         "unregistered\n");
2419#endif
2420}
2421
2422/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2423 *
2424 * There is no notion of Devloss or rport recovery from the current
2425 * nvme_transport perspective.  Loss of an rport just means IO cannot
2426 * be sent and recovery is completely up to the initator.
2427 * For now, the driver just unbinds the DID and port_role so that
2428 * no further IO can be issued.  Changes are planned for later.
2429 *
2430 * Notes - the ndlp reference count is not decremented here since
2431 * since there is no nvme_transport api for devloss.  Node ref count
2432 * is only adjusted in driver unload.
2433 */
2434void
2435lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2436{
2437#if (IS_ENABLED(CONFIG_NVME_FC))
2438        int ret;
2439        struct nvme_fc_local_port *localport;
2440        struct lpfc_nvme_lport *lport;
2441        struct lpfc_nvme_rport *rport;
2442        struct nvme_fc_remote_port *remoteport = NULL;
2443
2444        localport = vport->localport;
2445
2446        /* This is fundamental error.  The localport is always
2447         * available until driver unload.  Just exit.
2448         */
2449        if (!localport)
2450                return;
2451
2452        lport = (struct lpfc_nvme_lport *)localport->private;
2453        if (!lport)
2454                goto input_err;
2455
2456        spin_lock_irq(&ndlp->lock);
2457        rport = lpfc_ndlp_get_nrport(ndlp);
2458        if (rport)
2459                remoteport = rport->remoteport;
2460        spin_unlock_irq(&ndlp->lock);
2461        if (!remoteport)
2462                goto input_err;
2463
2464        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2465                         "6033 Unreg nvme remoteport x%px, portname x%llx, "
2466                         "port_id x%06x, portstate x%x port type x%x "
2467                         "refcnt %d\n",
2468                         remoteport, remoteport->port_name,
2469                         remoteport->port_id, remoteport->port_state,
2470                         ndlp->nlp_type, kref_read(&ndlp->kref));
2471
2472        /* Sanity check ndlp type.  Only call for NVME ports. Don't
2473         * clear any rport state until the transport calls back.
2474         */
2475
2476        if (ndlp->nlp_type & NLP_NVME_TARGET) {
2477                /* No concern about the role change on the nvme remoteport.
2478                 * The transport will update it.
2479                 */
2480                spin_lock_irq(&vport->phba->hbalock);
2481                ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
2482                spin_unlock_irq(&vport->phba->hbalock);
2483
2484                /* Don't let the host nvme transport keep sending keep-alives
2485                 * on this remoteport. Vport is unloading, no recovery. The
2486                 * return values is ignored.  The upcall is a courtesy to the
2487                 * transport.
2488                 */
2489                if (vport->load_flag & FC_UNLOADING)
2490                        (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2491
2492                ret = nvme_fc_unregister_remoteport(remoteport);
2493
2494                /* The driver no longer knows if the nrport memory is valid.
2495                 * because the controller teardown process has begun and
2496                 * is asynchronous.  Break the binding in the ndlp. Also
2497                 * remove the register ndlp reference to setup node release.
2498                 */
2499                ndlp->nrport = NULL;
2500                lpfc_nlp_put(ndlp);
2501                if (ret != 0) {
2502                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2503                                         "6167 NVME unregister failed %d "
2504                                         "port_state x%x\n",
2505                                         ret, remoteport->port_state);
2506                }
2507        }
2508        return;
2509
2510 input_err:
2511#endif
2512        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2513                         "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2514                         vport->localport, ndlp->rport, ndlp->nlp_DID);
2515}
2516
2517/**
2518 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2519 * @phba: pointer to lpfc hba data structure.
2520 * @axri: pointer to the fcp xri abort wcqe structure.
2521 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2522 *
2523 * This routine is invoked by the worker thread to process a SLI4 fast-path
2524 * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2525 * here.
2526 **/
2527void
2528lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2529                           struct sli4_wcqe_xri_aborted *axri,
2530                           struct lpfc_io_buf *lpfc_ncmd)
2531{
2532        uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2533        struct nvmefc_fcp_req *nvme_cmd = NULL;
2534        struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2535
2536
2537        if (ndlp)
2538                lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2539
2540        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2541                        "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2542                        "xri released\n",
2543                        lpfc_ncmd->nvmeCmd, xri,
2544                        lpfc_ncmd->cur_iocbq.iotag);
2545
2546        /* Aborted NVME commands are required to not complete
2547         * before the abort exchange command fully completes.
2548         * Once completed, it is available via the put list.
2549         */
2550        if (lpfc_ncmd->nvmeCmd) {
2551                nvme_cmd = lpfc_ncmd->nvmeCmd;
2552                nvme_cmd->done(nvme_cmd);
2553                lpfc_ncmd->nvmeCmd = NULL;
2554        }
2555        lpfc_release_nvme_buf(phba, lpfc_ncmd);
2556}
2557
2558/**
2559 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2560 * @phba: Pointer to HBA context object.
2561 *
2562 * This function flushes all wqes in the nvme rings and frees all resources
2563 * in the txcmplq. This function does not issue abort wqes for the IO
2564 * commands in txcmplq, they will just be returned with
2565 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2566 * slot has been permanently disabled.
2567 **/
2568void
2569lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2570{
2571        struct lpfc_sli_ring  *pring;
2572        u32 i, wait_cnt = 0;
2573
2574        if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2575                return;
2576
2577        /* Cycle through all IO rings and make sure all outstanding
2578         * WQEs have been removed from the txcmplqs.
2579         */
2580        for (i = 0; i < phba->cfg_hdw_queue; i++) {
2581                if (!phba->sli4_hba.hdwq[i].io_wq)
2582                        continue;
2583                pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2584
2585                if (!pring)
2586                        continue;
2587
2588                /* Retrieve everything on the txcmplq */
2589                while (!list_empty(&pring->txcmplq)) {
2590                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2591                        wait_cnt++;
2592
2593                        /* The sleep is 10mS.  Every ten seconds,
2594                         * dump a message.  Something is wrong.
2595                         */
2596                        if ((wait_cnt % 1000) == 0) {
2597                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2598                                                "6178 NVME IO not empty, "
2599                                                "cnt %d\n", wait_cnt);
2600                        }
2601                }
2602        }
2603
2604        /* Make sure HBA is alive */
2605        lpfc_issue_hb_tmo(phba);
2606
2607}
2608
2609void
2610lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2611                      uint32_t stat, uint32_t param)
2612{
2613#if (IS_ENABLED(CONFIG_NVME_FC))
2614        struct lpfc_io_buf *lpfc_ncmd;
2615        struct nvmefc_fcp_req *nCmd;
2616        struct lpfc_wcqe_complete wcqe;
2617        struct lpfc_wcqe_complete *wcqep = &wcqe;
2618
2619        lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2620        if (!lpfc_ncmd) {
2621                lpfc_sli_release_iocbq(phba, pwqeIn);
2622                return;
2623        }
2624        /* For abort iocb just return, IO iocb will do a done call */
2625        if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2626            CMD_ABORT_XRI_CX) {
2627                lpfc_sli_release_iocbq(phba, pwqeIn);
2628                return;
2629        }
2630
2631        spin_lock(&lpfc_ncmd->buf_lock);
2632        nCmd = lpfc_ncmd->nvmeCmd;
2633        if (!nCmd) {
2634                spin_unlock(&lpfc_ncmd->buf_lock);
2635                lpfc_release_nvme_buf(phba, lpfc_ncmd);
2636                return;
2637        }
2638        spin_unlock(&lpfc_ncmd->buf_lock);
2639
2640        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2641                        "6194 NVME Cancel xri %x\n",
2642                        lpfc_ncmd->cur_iocbq.sli4_xritag);
2643
2644        wcqep->word0 = 0;
2645        bf_set(lpfc_wcqe_c_status, wcqep, stat);
2646        wcqep->parameter = param;
2647        wcqep->word3 = 0; /* xb is 0 */
2648
2649        /* Call release with XB=1 to queue the IO into the abort list. */
2650        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2651                bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2652
2653        (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
2654#endif
2655}
2656