linux/drivers/scsi/lpfc/lpfc_nvmet.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 ********************************************************************/
  23#include <linux/pci.h>
  24#include <linux/slab.h>
  25#include <linux/interrupt.h>
  26#include <linux/delay.h>
  27#include <asm/unaligned.h>
  28#include <linux/crc-t10dif.h>
  29#include <net/checksum.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_tcq.h>
  36#include <scsi/scsi_transport_fc.h>
  37#include <scsi/fc/fc_fs.h>
  38
  39#include "lpfc_version.h"
  40#include "lpfc_hw4.h"
  41#include "lpfc_hw.h"
  42#include "lpfc_sli.h"
  43#include "lpfc_sli4.h"
  44#include "lpfc_nl.h"
  45#include "lpfc_disc.h"
  46#include "lpfc.h"
  47#include "lpfc_scsi.h"
  48#include "lpfc_nvme.h"
  49#include "lpfc_logmsg.h"
  50#include "lpfc_crtn.h"
  51#include "lpfc_vport.h"
  52#include "lpfc_debugfs.h"
  53
  54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
  55                                                 struct lpfc_async_xchg_ctx *,
  56                                                 dma_addr_t rspbuf,
  57                                                 uint16_t rspsize);
  58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
  59                                                  struct lpfc_async_xchg_ctx *);
  60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
  61                                          struct lpfc_async_xchg_ctx *,
  62                                          uint32_t, uint16_t);
  63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
  64                                            struct lpfc_async_xchg_ctx *,
  65                                            uint32_t, uint16_t);
  66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
  67                                    struct lpfc_async_xchg_ctx *);
  68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
  69
  70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
  71
  72static union lpfc_wqe128 lpfc_tsend_cmd_template;
  73static union lpfc_wqe128 lpfc_treceive_cmd_template;
  74static union lpfc_wqe128 lpfc_trsp_cmd_template;
  75
  76/* Setup WQE templates for NVME IOs */
  77void
  78lpfc_nvmet_cmd_template(void)
  79{
  80        union lpfc_wqe128 *wqe;
  81
  82        /* TSEND template */
  83        wqe = &lpfc_tsend_cmd_template;
  84        memset(wqe, 0, sizeof(union lpfc_wqe128));
  85
  86        /* Word 0, 1, 2 - BDE is variable */
  87
  88        /* Word 3 - payload_offset_len is zero */
  89
  90        /* Word 4 - relative_offset is variable */
  91
  92        /* Word 5 - is zero */
  93
  94        /* Word 6 - ctxt_tag, xri_tag is variable */
  95
  96        /* Word 7 - wqe_ar is variable */
  97        bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
  98        bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
  99        bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
 100        bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
 101        bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
 102
 103        /* Word 8 - abort_tag is variable */
 104
 105        /* Word 9  - reqtag, rcvoxid is variable */
 106
 107        /* Word 10 - wqes, xc is variable */
 108        bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
 109        bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
 110        bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
 111        bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
 112        bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
 113        bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
 114
 115        /* Word 11 - sup, irsp, irsplen is variable */
 116        bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
 117        bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 118        bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
 119        bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
 120        bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
 121        bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
 122
 123        /* Word 12 - fcp_data_len is variable */
 124
 125        /* Word 13, 14, 15 - PBDE is zero */
 126
 127        /* TRECEIVE template */
 128        wqe = &lpfc_treceive_cmd_template;
 129        memset(wqe, 0, sizeof(union lpfc_wqe128));
 130
 131        /* Word 0, 1, 2 - BDE is variable */
 132
 133        /* Word 3 */
 134        wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
 135
 136        /* Word 4 - relative_offset is variable */
 137
 138        /* Word 5 - is zero */
 139
 140        /* Word 6 - ctxt_tag, xri_tag is variable */
 141
 142        /* Word 7 */
 143        bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
 144        bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
 145        bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
 146        bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
 147        bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
 148
 149        /* Word 8 - abort_tag is variable */
 150
 151        /* Word 9  - reqtag, rcvoxid is variable */
 152
 153        /* Word 10 - xc is variable */
 154        bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
 155        bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
 156        bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
 157        bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
 158        bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
 159        bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
 160
 161        /* Word 11 - pbde is variable */
 162        bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
 163        bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 164        bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
 165        bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
 166        bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
 167        bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
 168
 169        /* Word 12 - fcp_data_len is variable */
 170
 171        /* Word 13, 14, 15 - PBDE is variable */
 172
 173        /* TRSP template */
 174        wqe = &lpfc_trsp_cmd_template;
 175        memset(wqe, 0, sizeof(union lpfc_wqe128));
 176
 177        /* Word 0, 1, 2 - BDE is variable */
 178
 179        /* Word 3 - response_len is variable */
 180
 181        /* Word 4, 5 - is zero */
 182
 183        /* Word 6 - ctxt_tag, xri_tag is variable */
 184
 185        /* Word 7 */
 186        bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
 187        bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
 188        bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
 189        bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
 190        bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
 191
 192        /* Word 8 - abort_tag is variable */
 193
 194        /* Word 9  - reqtag is variable */
 195
 196        /* Word 10 wqes, xc is variable */
 197        bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
 198        bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
 199        bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
 200        bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
 201        bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
 202        bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
 203
 204        /* Word 11 irsp, irsplen is variable */
 205        bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
 206        bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 207        bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
 208        bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
 209        bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
 210        bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
 211
 212        /* Word 12, 13, 14, 15 - is zero */
 213}
 214
 215#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 216static struct lpfc_async_xchg_ctx *
 217lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
 218{
 219        struct lpfc_async_xchg_ctx *ctxp;
 220        unsigned long iflag;
 221        bool found = false;
 222
 223        spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 224        list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
 225                if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
 226                        continue;
 227
 228                found = true;
 229                break;
 230        }
 231        spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 232        if (found)
 233                return ctxp;
 234
 235        return NULL;
 236}
 237
 238static struct lpfc_async_xchg_ctx *
 239lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
 240{
 241        struct lpfc_async_xchg_ctx *ctxp;
 242        unsigned long iflag;
 243        bool found = false;
 244
 245        spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 246        list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
 247                if (ctxp->oxid != oxid || ctxp->sid != sid)
 248                        continue;
 249
 250                found = true;
 251                break;
 252        }
 253        spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 254        if (found)
 255                return ctxp;
 256
 257        return NULL;
 258}
 259#endif
 260
 261static void
 262lpfc_nvmet_defer_release(struct lpfc_hba *phba,
 263                        struct lpfc_async_xchg_ctx *ctxp)
 264{
 265        lockdep_assert_held(&ctxp->ctxlock);
 266
 267        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
 268                        "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
 269                        ctxp->oxid, ctxp->flag);
 270
 271        if (ctxp->flag & LPFC_NVME_CTX_RLS)
 272                return;
 273
 274        ctxp->flag |= LPFC_NVME_CTX_RLS;
 275        spin_lock(&phba->sli4_hba.t_active_list_lock);
 276        list_del(&ctxp->list);
 277        spin_unlock(&phba->sli4_hba.t_active_list_lock);
 278        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 279        list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
 280        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 281}
 282
 283/**
 284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
 285 *         transmission of an NVME LS response.
 286 * @phba: Pointer to HBA context object.
 287 * @cmdwqe: Pointer to driver command WQE object.
 288 * @wcqe: Pointer to driver response CQE object.
 289 *
 290 * The function is called from SLI ring event handler with no
 291 * lock held. The function frees memory resources used for the command
 292 * used to send the NVME LS RSP.
 293 **/
 294void
 295__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 296                           struct lpfc_wcqe_complete *wcqe)
 297{
 298        struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
 299        struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
 300        uint32_t status, result;
 301
 302        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 303        result = wcqe->parameter;
 304
 305        if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
 306                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 307                                "6410 NVMEx LS cmpl state mismatch IO x%x: "
 308                                "%d %d\n",
 309                                axchg->oxid, axchg->state, axchg->entry_cnt);
 310        }
 311
 312        lpfc_nvmeio_data(phba, "NVMEx LS  CMPL: xri x%x stat x%x result x%x\n",
 313                         axchg->oxid, status, result);
 314
 315        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 316                        "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
 317                        status, result, axchg->oxid);
 318
 319        lpfc_nlp_put(cmdwqe->context1);
 320        cmdwqe->context2 = NULL;
 321        cmdwqe->context3 = NULL;
 322        lpfc_sli_release_iocbq(phba, cmdwqe);
 323        ls_rsp->done(ls_rsp);
 324        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 325                        "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
 326                        status, axchg->oxid);
 327        kfree(axchg);
 328}
 329
 330/**
 331 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
 332 * @phba: Pointer to HBA context object.
 333 * @cmdwqe: Pointer to driver command WQE object.
 334 * @wcqe: Pointer to driver response CQE object.
 335 *
 336 * The function is called from SLI ring event handler with no
 337 * lock held. This function is the completion handler for NVME LS commands
 338 * The function updates any states and statistics, then calls the
 339 * generic completion handler to free resources.
 340 **/
 341static void
 342lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 343                          struct lpfc_wcqe_complete *wcqe)
 344{
 345        struct lpfc_nvmet_tgtport *tgtp;
 346        uint32_t status, result;
 347
 348        if (!phba->targetport)
 349                goto finish;
 350
 351        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 352        result = wcqe->parameter;
 353
 354        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 355        if (tgtp) {
 356                if (status) {
 357                        atomic_inc(&tgtp->xmt_ls_rsp_error);
 358                        if (result == IOERR_ABORT_REQUESTED)
 359                                atomic_inc(&tgtp->xmt_ls_rsp_aborted);
 360                        if (bf_get(lpfc_wcqe_c_xb, wcqe))
 361                                atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
 362                } else {
 363                        atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
 364                }
 365        }
 366
 367finish:
 368        __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
 369}
 370
 371/**
 372 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
 373 * @phba: HBA buffer is associated with
 374 * @ctx_buf: ctx buffer context
 375 *
 376 * Description: Frees the given DMA buffer in the appropriate way given by
 377 * reposting it to its associated RQ so it can be reused.
 378 *
 379 * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
 380 *
 381 * Returns: None
 382 **/
 383void
 384lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 385{
 386#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 387        struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
 388        struct lpfc_nvmet_tgtport *tgtp;
 389        struct fc_frame_header *fc_hdr;
 390        struct rqb_dmabuf *nvmebuf;
 391        struct lpfc_nvmet_ctx_info *infop;
 392        uint32_t size, oxid, sid;
 393        int cpu;
 394        unsigned long iflag;
 395
 396        if (ctxp->state == LPFC_NVME_STE_FREE) {
 397                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 398                                "6411 NVMET free, already free IO x%x: %d %d\n",
 399                                ctxp->oxid, ctxp->state, ctxp->entry_cnt);
 400        }
 401
 402        if (ctxp->rqb_buffer) {
 403                spin_lock_irqsave(&ctxp->ctxlock, iflag);
 404                nvmebuf = ctxp->rqb_buffer;
 405                /* check if freed in another path whilst acquiring lock */
 406                if (nvmebuf) {
 407                        ctxp->rqb_buffer = NULL;
 408                        if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
 409                                ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
 410                                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 411                                nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
 412                                                                    nvmebuf);
 413                        } else {
 414                                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 415                                /* repost */
 416                                lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
 417                        }
 418                } else {
 419                        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 420                }
 421        }
 422        ctxp->state = LPFC_NVME_STE_FREE;
 423
 424        spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 425        if (phba->sli4_hba.nvmet_io_wait_cnt) {
 426                list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
 427                                 nvmebuf, struct rqb_dmabuf,
 428                                 hbuf.list);
 429                phba->sli4_hba.nvmet_io_wait_cnt--;
 430                spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
 431                                       iflag);
 432
 433                fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
 434                oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 435                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 436                size = nvmebuf->bytes_recv;
 437                sid = sli4_sid_from_fc_hdr(fc_hdr);
 438
 439                ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
 440                ctxp->wqeq = NULL;
 441                ctxp->offset = 0;
 442                ctxp->phba = phba;
 443                ctxp->size = size;
 444                ctxp->oxid = oxid;
 445                ctxp->sid = sid;
 446                ctxp->state = LPFC_NVME_STE_RCV;
 447                ctxp->entry_cnt = 1;
 448                ctxp->flag = 0;
 449                ctxp->ctxbuf = ctx_buf;
 450                ctxp->rqb_buffer = (void *)nvmebuf;
 451                spin_lock_init(&ctxp->ctxlock);
 452
 453#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 454                /* NOTE: isr time stamp is stale when context is re-assigned*/
 455                if (ctxp->ts_isr_cmd) {
 456                        ctxp->ts_cmd_nvme = 0;
 457                        ctxp->ts_nvme_data = 0;
 458                        ctxp->ts_data_wqput = 0;
 459                        ctxp->ts_isr_data = 0;
 460                        ctxp->ts_data_nvme = 0;
 461                        ctxp->ts_nvme_status = 0;
 462                        ctxp->ts_status_wqput = 0;
 463                        ctxp->ts_isr_status = 0;
 464                        ctxp->ts_status_nvme = 0;
 465                }
 466#endif
 467                atomic_inc(&tgtp->rcv_fcp_cmd_in);
 468
 469                /* Indicate that a replacement buffer has been posted */
 470                spin_lock_irqsave(&ctxp->ctxlock, iflag);
 471                ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
 472                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 473
 474                if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
 475                        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
 476                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 477                                        "6181 Unable to queue deferred work "
 478                                        "for oxid x%x. "
 479                                        "FCP Drop IO [x%x x%x x%x]\n",
 480                                        ctxp->oxid,
 481                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
 482                                        atomic_read(&tgtp->rcv_fcp_cmd_out),
 483                                        atomic_read(&tgtp->xmt_fcp_release));
 484
 485                        spin_lock_irqsave(&ctxp->ctxlock, iflag);
 486                        lpfc_nvmet_defer_release(phba, ctxp);
 487                        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 488                        lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
 489                }
 490                return;
 491        }
 492        spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 493
 494        /*
 495         * Use the CPU context list, from the MRQ the IO was received on
 496         * (ctxp->idx), to save context structure.
 497         */
 498        spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
 499        list_del_init(&ctxp->list);
 500        spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 501        cpu = raw_smp_processor_id();
 502        infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
 503        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
 504        list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
 505        infop->nvmet_ctx_list_cnt++;
 506        spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
 507#endif
 508}
 509
 510#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 511static void
 512lpfc_nvmet_ktime(struct lpfc_hba *phba,
 513                 struct lpfc_async_xchg_ctx *ctxp)
 514{
 515        uint64_t seg1, seg2, seg3, seg4, seg5;
 516        uint64_t seg6, seg7, seg8, seg9, seg10;
 517        uint64_t segsum;
 518
 519        if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
 520            !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
 521            !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
 522            !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
 523            !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
 524                return;
 525
 526        if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
 527                return;
 528        if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
 529                return;
 530        if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
 531                return;
 532        if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
 533                return;
 534        if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
 535                return;
 536        if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
 537                return;
 538        if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
 539                return;
 540        if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
 541                return;
 542        if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
 543                return;
 544        if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
 545                return;
 546        /*
 547         * Segment 1 - Time from FCP command received by MSI-X ISR
 548         * to FCP command is passed to NVME Layer.
 549         * Segment 2 - Time from FCP command payload handed
 550         * off to NVME Layer to Driver receives a Command op
 551         * from NVME Layer.
 552         * Segment 3 - Time from Driver receives a Command op
 553         * from NVME Layer to Command is put on WQ.
 554         * Segment 4 - Time from Driver WQ put is done
 555         * to MSI-X ISR for Command cmpl.
 556         * Segment 5 - Time from MSI-X ISR for Command cmpl to
 557         * Command cmpl is passed to NVME Layer.
 558         * Segment 6 - Time from Command cmpl is passed to NVME
 559         * Layer to Driver receives a RSP op from NVME Layer.
 560         * Segment 7 - Time from Driver receives a RSP op from
 561         * NVME Layer to WQ put is done on TRSP FCP Status.
 562         * Segment 8 - Time from Driver WQ put is done on TRSP
 563         * FCP Status to MSI-X ISR for TRSP cmpl.
 564         * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
 565         * TRSP cmpl is passed to NVME Layer.
 566         * Segment 10 - Time from FCP command received by
 567         * MSI-X ISR to command is completed on wire.
 568         * (Segments 1 thru 8) for READDATA / WRITEDATA
 569         * (Segments 1 thru 4) for READDATA_RSP
 570         */
 571        seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
 572        segsum = seg1;
 573
 574        seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
 575        if (segsum > seg2)
 576                return;
 577        seg2 -= segsum;
 578        segsum += seg2;
 579
 580        seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
 581        if (segsum > seg3)
 582                return;
 583        seg3 -= segsum;
 584        segsum += seg3;
 585
 586        seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
 587        if (segsum > seg4)
 588                return;
 589        seg4 -= segsum;
 590        segsum += seg4;
 591
 592        seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
 593        if (segsum > seg5)
 594                return;
 595        seg5 -= segsum;
 596        segsum += seg5;
 597
 598
 599        /* For auto rsp commands seg6 thru seg10 will be 0 */
 600        if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
 601                seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
 602                if (segsum > seg6)
 603                        return;
 604                seg6 -= segsum;
 605                segsum += seg6;
 606
 607                seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
 608                if (segsum > seg7)
 609                        return;
 610                seg7 -= segsum;
 611                segsum += seg7;
 612
 613                seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
 614                if (segsum > seg8)
 615                        return;
 616                seg8 -= segsum;
 617                segsum += seg8;
 618
 619                seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
 620                if (segsum > seg9)
 621                        return;
 622                seg9 -= segsum;
 623                segsum += seg9;
 624
 625                if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
 626                        return;
 627                seg10 = (ctxp->ts_isr_status -
 628                        ctxp->ts_isr_cmd);
 629        } else {
 630                if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
 631                        return;
 632                seg6 =  0;
 633                seg7 =  0;
 634                seg8 =  0;
 635                seg9 =  0;
 636                seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
 637        }
 638
 639        phba->ktime_seg1_total += seg1;
 640        if (seg1 < phba->ktime_seg1_min)
 641                phba->ktime_seg1_min = seg1;
 642        else if (seg1 > phba->ktime_seg1_max)
 643                phba->ktime_seg1_max = seg1;
 644
 645        phba->ktime_seg2_total += seg2;
 646        if (seg2 < phba->ktime_seg2_min)
 647                phba->ktime_seg2_min = seg2;
 648        else if (seg2 > phba->ktime_seg2_max)
 649                phba->ktime_seg2_max = seg2;
 650
 651        phba->ktime_seg3_total += seg3;
 652        if (seg3 < phba->ktime_seg3_min)
 653                phba->ktime_seg3_min = seg3;
 654        else if (seg3 > phba->ktime_seg3_max)
 655                phba->ktime_seg3_max = seg3;
 656
 657        phba->ktime_seg4_total += seg4;
 658        if (seg4 < phba->ktime_seg4_min)
 659                phba->ktime_seg4_min = seg4;
 660        else if (seg4 > phba->ktime_seg4_max)
 661                phba->ktime_seg4_max = seg4;
 662
 663        phba->ktime_seg5_total += seg5;
 664        if (seg5 < phba->ktime_seg5_min)
 665                phba->ktime_seg5_min = seg5;
 666        else if (seg5 > phba->ktime_seg5_max)
 667                phba->ktime_seg5_max = seg5;
 668
 669        phba->ktime_data_samples++;
 670        if (!seg6)
 671                goto out;
 672
 673        phba->ktime_seg6_total += seg6;
 674        if (seg6 < phba->ktime_seg6_min)
 675                phba->ktime_seg6_min = seg6;
 676        else if (seg6 > phba->ktime_seg6_max)
 677                phba->ktime_seg6_max = seg6;
 678
 679        phba->ktime_seg7_total += seg7;
 680        if (seg7 < phba->ktime_seg7_min)
 681                phba->ktime_seg7_min = seg7;
 682        else if (seg7 > phba->ktime_seg7_max)
 683                phba->ktime_seg7_max = seg7;
 684
 685        phba->ktime_seg8_total += seg8;
 686        if (seg8 < phba->ktime_seg8_min)
 687                phba->ktime_seg8_min = seg8;
 688        else if (seg8 > phba->ktime_seg8_max)
 689                phba->ktime_seg8_max = seg8;
 690
 691        phba->ktime_seg9_total += seg9;
 692        if (seg9 < phba->ktime_seg9_min)
 693                phba->ktime_seg9_min = seg9;
 694        else if (seg9 > phba->ktime_seg9_max)
 695                phba->ktime_seg9_max = seg9;
 696out:
 697        phba->ktime_seg10_total += seg10;
 698        if (seg10 < phba->ktime_seg10_min)
 699                phba->ktime_seg10_min = seg10;
 700        else if (seg10 > phba->ktime_seg10_max)
 701                phba->ktime_seg10_max = seg10;
 702        phba->ktime_status_samples++;
 703}
 704#endif
 705
 706/**
 707 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
 708 * @phba: Pointer to HBA context object.
 709 * @cmdwqe: Pointer to driver command WQE object.
 710 * @wcqe: Pointer to driver response CQE object.
 711 *
 712 * The function is called from SLI ring event handler with no
 713 * lock held. This function is the completion handler for NVME FCP commands
 714 * The function frees memory resources used for the NVME commands.
 715 **/
 716static void
 717lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 718                          struct lpfc_wcqe_complete *wcqe)
 719{
 720        struct lpfc_nvmet_tgtport *tgtp;
 721        struct nvmefc_tgt_fcp_req *rsp;
 722        struct lpfc_async_xchg_ctx *ctxp;
 723        uint32_t status, result, op, start_clean, logerr;
 724#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 725        int id;
 726#endif
 727
 728        ctxp = cmdwqe->context2;
 729        ctxp->flag &= ~LPFC_NVME_IO_INP;
 730
 731        rsp = &ctxp->hdlrctx.fcp_req;
 732        op = rsp->op;
 733
 734        status = bf_get(lpfc_wcqe_c_status, wcqe);
 735        result = wcqe->parameter;
 736
 737        if (phba->targetport)
 738                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 739        else
 740                tgtp = NULL;
 741
 742        lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
 743                         ctxp->oxid, op, status);
 744
 745        if (status) {
 746                rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
 747                rsp->transferred_length = 0;
 748                if (tgtp) {
 749                        atomic_inc(&tgtp->xmt_fcp_rsp_error);
 750                        if (result == IOERR_ABORT_REQUESTED)
 751                                atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
 752                }
 753
 754                logerr = LOG_NVME_IOERR;
 755
 756                /* pick up SLI4 exhange busy condition */
 757                if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
 758                        ctxp->flag |= LPFC_NVME_XBUSY;
 759                        logerr |= LOG_NVME_ABTS;
 760                        if (tgtp)
 761                                atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
 762
 763                } else {
 764                        ctxp->flag &= ~LPFC_NVME_XBUSY;
 765                }
 766
 767                lpfc_printf_log(phba, KERN_INFO, logerr,
 768                                "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
 769                                "XBUSY:x%x\n",
 770                                ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
 771                                status, result, ctxp->flag);
 772
 773        } else {
 774                rsp->fcp_error = NVME_SC_SUCCESS;
 775                if (op == NVMET_FCOP_RSP)
 776                        rsp->transferred_length = rsp->rsplen;
 777                else
 778                        rsp->transferred_length = rsp->transfer_length;
 779                if (tgtp)
 780                        atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
 781        }
 782
 783        if ((op == NVMET_FCOP_READDATA_RSP) ||
 784            (op == NVMET_FCOP_RSP)) {
 785                /* Sanity check */
 786                ctxp->state = LPFC_NVME_STE_DONE;
 787                ctxp->entry_cnt++;
 788
 789#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 790                if (ctxp->ts_cmd_nvme) {
 791                        if (rsp->op == NVMET_FCOP_READDATA_RSP) {
 792                                ctxp->ts_isr_data =
 793                                        cmdwqe->isr_timestamp;
 794                                ctxp->ts_data_nvme =
 795                                        ktime_get_ns();
 796                                ctxp->ts_nvme_status =
 797                                        ctxp->ts_data_nvme;
 798                                ctxp->ts_status_wqput =
 799                                        ctxp->ts_data_nvme;
 800                                ctxp->ts_isr_status =
 801                                        ctxp->ts_data_nvme;
 802                                ctxp->ts_status_nvme =
 803                                        ctxp->ts_data_nvme;
 804                        } else {
 805                                ctxp->ts_isr_status =
 806                                        cmdwqe->isr_timestamp;
 807                                ctxp->ts_status_nvme =
 808                                        ktime_get_ns();
 809                        }
 810                }
 811#endif
 812                rsp->done(rsp);
 813#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 814                if (ctxp->ts_cmd_nvme)
 815                        lpfc_nvmet_ktime(phba, ctxp);
 816#endif
 817                /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
 818        } else {
 819                ctxp->entry_cnt++;
 820                start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
 821                memset(((char *)cmdwqe) + start_clean, 0,
 822                       (sizeof(struct lpfc_iocbq) - start_clean));
 823#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 824                if (ctxp->ts_cmd_nvme) {
 825                        ctxp->ts_isr_data = cmdwqe->isr_timestamp;
 826                        ctxp->ts_data_nvme = ktime_get_ns();
 827                }
 828#endif
 829                rsp->done(rsp);
 830        }
 831#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 832        if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
 833                id = raw_smp_processor_id();
 834                this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
 835                if (ctxp->cpu != id)
 836                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
 837                                        "6704 CPU Check cmdcmpl: "
 838                                        "cpu %d expect %d\n",
 839                                        id, ctxp->cpu);
 840        }
 841#endif
 842}
 843
 844/**
 845 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
 846 *         an NVME LS rsp for a prior NVME LS request that was received.
 847 * @axchg: pointer to exchange context for the NVME LS request the response
 848 *         is for.
 849 * @ls_rsp: pointer to the transport LS RSP that is to be sent
 850 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
 851 *
 852 * This routine is used to format and send a WQE to transmit a NVME LS
 853 * Response.  The response is for a prior NVME LS request that was
 854 * received and posted to the transport.
 855 *
 856 * Returns:
 857 *  0 : if response successfully transmit
 858 *  non-zero : if response failed to transmit, of the form -Exxx.
 859 **/
 860int
 861__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
 862                        struct nvmefc_ls_rsp *ls_rsp,
 863                        void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
 864                                struct lpfc_iocbq *cmdwqe,
 865                                struct lpfc_wcqe_complete *wcqe))
 866{
 867        struct lpfc_hba *phba = axchg->phba;
 868        struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
 869        struct lpfc_iocbq *nvmewqeq;
 870        struct lpfc_dmabuf dmabuf;
 871        struct ulp_bde64 bpl;
 872        int rc;
 873
 874        if (phba->pport->load_flag & FC_UNLOADING)
 875                return -ENODEV;
 876
 877        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 878                        "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
 879
 880        if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
 881                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 882                                "6412 NVMEx LS rsp state mismatch "
 883                                "oxid x%x: %d %d\n",
 884                                axchg->oxid, axchg->state, axchg->entry_cnt);
 885                return -EALREADY;
 886        }
 887        axchg->state = LPFC_NVME_STE_LS_RSP;
 888        axchg->entry_cnt++;
 889
 890        nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
 891                                         ls_rsp->rsplen);
 892        if (nvmewqeq == NULL) {
 893                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 894                                "6150 NVMEx LS Drop Rsp x%x: Prep\n",
 895                                axchg->oxid);
 896                rc = -ENOMEM;
 897                goto out_free_buf;
 898        }
 899
 900        /* Save numBdes for bpl2sgl */
 901        nvmewqeq->rsvd2 = 1;
 902        nvmewqeq->hba_wqidx = 0;
 903        nvmewqeq->context3 = &dmabuf;
 904        dmabuf.virt = &bpl;
 905        bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
 906        bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
 907        bpl.tus.f.bdeSize = ls_rsp->rsplen;
 908        bpl.tus.f.bdeFlags = 0;
 909        bpl.tus.w = le32_to_cpu(bpl.tus.w);
 910        /*
 911         * Note: although we're using stack space for the dmabuf, the
 912         * call to lpfc_sli4_issue_wqe is synchronous, so it will not
 913         * be referenced after it returns back to this routine.
 914         */
 915
 916        nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
 917        nvmewqeq->iocb_cmpl = NULL;
 918        nvmewqeq->context2 = axchg;
 919
 920        lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
 921                         axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
 922
 923        rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
 924
 925        /* clear to be sure there's no reference */
 926        nvmewqeq->context3 = NULL;
 927
 928        if (rc == WQE_SUCCESS) {
 929                /*
 930                 * Okay to repost buffer here, but wait till cmpl
 931                 * before freeing ctxp and iocbq.
 932                 */
 933                lpfc_in_buf_free(phba, &nvmebuf->dbuf);
 934                return 0;
 935        }
 936
 937        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 938                        "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
 939                        axchg->oxid, rc);
 940
 941        rc = -ENXIO;
 942
 943        lpfc_nlp_put(nvmewqeq->context1);
 944
 945out_free_buf:
 946        /* Give back resources */
 947        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
 948
 949        /*
 950         * As transport doesn't track completions of responses, if the rsp
 951         * fails to send, the transport will effectively ignore the rsp
 952         * and consider the LS done. However, the driver has an active
 953         * exchange open for the LS - so be sure to abort the exchange
 954         * if the response isn't sent.
 955         */
 956        lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
 957        return rc;
 958}
 959
 960/**
 961 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
 962 * @tgtport: pointer to target port that NVME LS is to be transmit from.
 963 * @ls_rsp: pointer to the transport LS RSP that is to be sent
 964 *
 965 * Driver registers this routine to transmit responses for received NVME
 966 * LS requests.
 967 *
 968 * This routine is used to format and send a WQE to transmit a NVME LS
 969 * Response. The ls_rsp is used to reverse-map the LS to the original
 970 * NVME LS request sequence, which provides addressing information for
 971 * the remote port the LS to be sent to, as well as the exchange id
 972 * that is the LS is bound to.
 973 *
 974 * Returns:
 975 *  0 : if response successfully transmit
 976 *  non-zero : if response failed to transmit, of the form -Exxx.
 977 **/
 978static int
 979lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
 980                      struct nvmefc_ls_rsp *ls_rsp)
 981{
 982        struct lpfc_async_xchg_ctx *axchg =
 983                container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
 984        struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
 985        int rc;
 986
 987        if (axchg->phba->pport->load_flag & FC_UNLOADING)
 988                return -ENODEV;
 989
 990        rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
 991
 992        if (rc) {
 993                atomic_inc(&nvmep->xmt_ls_drop);
 994                /*
 995                 * unless the failure is due to having already sent
 996                 * the response, an abort will be generated for the
 997                 * exchange if the rsp can't be sent.
 998                 */
 999                if (rc != -EALREADY)
1000                        atomic_inc(&nvmep->xmt_ls_abort);
1001                return rc;
1002        }
1003
1004        atomic_inc(&nvmep->xmt_ls_rsp);
1005        return 0;
1006}
1007
1008static int
1009lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010                      struct nvmefc_tgt_fcp_req *rsp)
1011{
1012        struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013        struct lpfc_async_xchg_ctx *ctxp =
1014                container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015        struct lpfc_hba *phba = ctxp->phba;
1016        struct lpfc_queue *wq;
1017        struct lpfc_iocbq *nvmewqeq;
1018        struct lpfc_sli_ring *pring;
1019        unsigned long iflags;
1020        int rc;
1021#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1022        int id;
1023#endif
1024
1025        if (phba->pport->load_flag & FC_UNLOADING) {
1026                rc = -ENODEV;
1027                goto aerr;
1028        }
1029
1030#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031        if (ctxp->ts_cmd_nvme) {
1032                if (rsp->op == NVMET_FCOP_RSP)
1033                        ctxp->ts_nvme_status = ktime_get_ns();
1034                else
1035                        ctxp->ts_nvme_data = ktime_get_ns();
1036        }
1037
1038        /* Setup the hdw queue if not already set */
1039        if (!ctxp->hdwq)
1040                ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1041
1042        if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043                id = raw_smp_processor_id();
1044                this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045                if (rsp->hwqid != id)
1046                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047                                        "6705 CPU Check OP: "
1048                                        "cpu %d expect %d\n",
1049                                        id, rsp->hwqid);
1050                ctxp->cpu = id; /* Setup cpu for cmpl check */
1051        }
1052#endif
1053
1054        /* Sanity check */
1055        if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056            (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057                atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059                                "6102 IO oxid x%x aborted\n",
1060                                ctxp->oxid);
1061                rc = -ENXIO;
1062                goto aerr;
1063        }
1064
1065        nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066        if (nvmewqeq == NULL) {
1067                atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069                                "6152 FCP Drop IO x%x: Prep\n",
1070                                ctxp->oxid);
1071                rc = -ENXIO;
1072                goto aerr;
1073        }
1074
1075        nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076        nvmewqeq->iocb_cmpl = NULL;
1077        nvmewqeq->context2 = ctxp;
1078        nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
1079        ctxp->wqeq->hba_wqidx = rsp->hwqid;
1080
1081        lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1082                         ctxp->oxid, rsp->op, rsp->rsplen);
1083
1084        ctxp->flag |= LPFC_NVME_IO_INP;
1085        rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1086        if (rc == WQE_SUCCESS) {
1087#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1088                if (!ctxp->ts_cmd_nvme)
1089                        return 0;
1090                if (rsp->op == NVMET_FCOP_RSP)
1091                        ctxp->ts_status_wqput = ktime_get_ns();
1092                else
1093                        ctxp->ts_data_wqput = ktime_get_ns();
1094#endif
1095                return 0;
1096        }
1097
1098        if (rc == -EBUSY) {
1099                /*
1100                 * WQ was full, so queue nvmewqeq to be sent after
1101                 * WQE release CQE
1102                 */
1103                ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1104                wq = ctxp->hdwq->io_wq;
1105                pring = wq->pring;
1106                spin_lock_irqsave(&pring->ring_lock, iflags);
1107                list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1108                wq->q_flag |= HBA_NVMET_WQFULL;
1109                spin_unlock_irqrestore(&pring->ring_lock, iflags);
1110                atomic_inc(&lpfc_nvmep->defer_wqfull);
1111                return 0;
1112        }
1113
1114        /* Give back resources */
1115        atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1116        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1117                        "6153 FCP Drop IO x%x: Issue: %d\n",
1118                        ctxp->oxid, rc);
1119
1120        ctxp->wqeq->hba_wqidx = 0;
1121        nvmewqeq->context2 = NULL;
1122        nvmewqeq->context3 = NULL;
1123        rc = -EBUSY;
1124aerr:
1125        return rc;
1126}
1127
1128static void
1129lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1130{
1131        struct lpfc_nvmet_tgtport *tport = targetport->private;
1132
1133        /* release any threads waiting for the unreg to complete */
1134        if (tport->phba->targetport)
1135                complete(tport->tport_unreg_cmp);
1136}
1137
1138static void
1139lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1140                         struct nvmefc_tgt_fcp_req *req)
1141{
1142        struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1143        struct lpfc_async_xchg_ctx *ctxp =
1144                container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1145        struct lpfc_hba *phba = ctxp->phba;
1146        struct lpfc_queue *wq;
1147        unsigned long flags;
1148
1149        if (phba->pport->load_flag & FC_UNLOADING)
1150                return;
1151
1152        if (!ctxp->hdwq)
1153                ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1154
1155        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1156                        "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1157                        ctxp->oxid, ctxp->flag, ctxp->state);
1158
1159        lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1160                         ctxp->oxid, ctxp->flag, ctxp->state);
1161
1162        atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1163
1164        spin_lock_irqsave(&ctxp->ctxlock, flags);
1165
1166        /* Since iaab/iaar are NOT set, we need to check
1167         * if the firmware is in process of aborting IO
1168         */
1169        if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1170                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1171                return;
1172        }
1173        ctxp->flag |= LPFC_NVME_ABORT_OP;
1174
1175        if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1176                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1177                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1178                                                 ctxp->oxid);
1179                wq = ctxp->hdwq->io_wq;
1180                lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1181                return;
1182        }
1183        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1184
1185        /* A state of LPFC_NVME_STE_RCV means we have just received
1186         * the NVME command and have not started processing it.
1187         * (by issuing any IO WQEs on this exchange yet)
1188         */
1189        if (ctxp->state == LPFC_NVME_STE_RCV)
1190                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1191                                                 ctxp->oxid);
1192        else
1193                lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1194                                               ctxp->oxid);
1195}
1196
1197static void
1198lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1199                           struct nvmefc_tgt_fcp_req *rsp)
1200{
1201        struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1202        struct lpfc_async_xchg_ctx *ctxp =
1203                container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1204        struct lpfc_hba *phba = ctxp->phba;
1205        unsigned long flags;
1206        bool aborting = false;
1207
1208        spin_lock_irqsave(&ctxp->ctxlock, flags);
1209        if (ctxp->flag & LPFC_NVME_XBUSY)
1210                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1211                                "6027 NVMET release with XBUSY flag x%x"
1212                                " oxid x%x\n",
1213                                ctxp->flag, ctxp->oxid);
1214        else if (ctxp->state != LPFC_NVME_STE_DONE &&
1215                 ctxp->state != LPFC_NVME_STE_ABORT)
1216                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1217                                "6413 NVMET release bad state %d %d oxid x%x\n",
1218                                ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1219
1220        if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1221            (ctxp->flag & LPFC_NVME_XBUSY)) {
1222                aborting = true;
1223                /* let the abort path do the real release */
1224                lpfc_nvmet_defer_release(phba, ctxp);
1225        }
1226        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1227
1228        lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1229                         ctxp->state, aborting);
1230
1231        atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1232        ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1233
1234        if (aborting)
1235                return;
1236
1237        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1238}
1239
1240static void
1241lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1242                     struct nvmefc_tgt_fcp_req *rsp)
1243{
1244        struct lpfc_nvmet_tgtport *tgtp;
1245        struct lpfc_async_xchg_ctx *ctxp =
1246                container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1247        struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1248        struct lpfc_hba *phba = ctxp->phba;
1249        unsigned long iflag;
1250
1251
1252        lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1253                         ctxp->oxid, ctxp->size, raw_smp_processor_id());
1254
1255        if (!nvmebuf) {
1256                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1257                                "6425 Defer rcv: no buffer oxid x%x: "
1258                                "flg %x ste %x\n",
1259                                ctxp->oxid, ctxp->flag, ctxp->state);
1260                return;
1261        }
1262
1263        tgtp = phba->targetport->private;
1264        if (tgtp)
1265                atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1266
1267        /* Free the nvmebuf since a new buffer already replaced it */
1268        nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1269        spin_lock_irqsave(&ctxp->ctxlock, iflag);
1270        ctxp->rqb_buffer = NULL;
1271        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1272}
1273
1274/**
1275 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1276 * @phba: Pointer to HBA context object
1277 * @cmdwqe: Pointer to driver command WQE object.
1278 * @wcqe: Pointer to driver response CQE object.
1279 *
1280 * This function is the completion handler for NVME LS requests.
1281 * The function updates any states and statistics, then calls the
1282 * generic completion handler to finish completion of the request.
1283 **/
1284static void
1285lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1286                       struct lpfc_wcqe_complete *wcqe)
1287{
1288        __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1289}
1290
1291/**
1292 * lpfc_nvmet_ls_req - Issue an Link Service request
1293 * @targetport: pointer to target instance registered with nvmet transport.
1294 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1295 *               Driver sets this value to the ndlp pointer.
1296 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
1297 *
1298 * Driver registers this routine to handle any link service request
1299 * from the nvme_fc transport to a remote nvme-aware port.
1300 *
1301 * Return value :
1302 *   0 - Success
1303 *   non-zero: various error codes, in form of -Exxx
1304 **/
1305static int
1306lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1307                  void *hosthandle,
1308                  struct nvmefc_ls_req *pnvme_lsreq)
1309{
1310        struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311        struct lpfc_hba *phba;
1312        struct lpfc_nodelist *ndlp;
1313        int ret;
1314        u32 hstate;
1315
1316        if (!lpfc_nvmet)
1317                return -EINVAL;
1318
1319        phba = lpfc_nvmet->phba;
1320        if (phba->pport->load_flag & FC_UNLOADING)
1321                return -EINVAL;
1322
1323        hstate = atomic_read(&lpfc_nvmet->state);
1324        if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1325                return -EACCES;
1326
1327        ndlp = (struct lpfc_nodelist *)hosthandle;
1328
1329        ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330                                 lpfc_nvmet_ls_req_cmp);
1331
1332        return ret;
1333}
1334
1335/**
1336 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1337 * @targetport: Transport targetport, that LS was issued from.
1338 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1339 *               Driver sets this value to the ndlp pointer.
1340 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
1341 *
1342 * Driver registers this routine to abort an NVME LS request that is
1343 * in progress (from the transports perspective).
1344 **/
1345static void
1346lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1347                    void *hosthandle,
1348                    struct nvmefc_ls_req *pnvme_lsreq)
1349{
1350        struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351        struct lpfc_hba *phba;
1352        struct lpfc_nodelist *ndlp;
1353        int ret;
1354
1355        phba = lpfc_nvmet->phba;
1356        if (phba->pport->load_flag & FC_UNLOADING)
1357                return;
1358
1359        ndlp = (struct lpfc_nodelist *)hosthandle;
1360
1361        ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1362        if (!ret)
1363                atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1364}
1365
1366static void
1367lpfc_nvmet_host_release(void *hosthandle)
1368{
1369        struct lpfc_nodelist *ndlp = hosthandle;
1370        struct lpfc_hba *phba = ndlp->phba;
1371        struct lpfc_nvmet_tgtport *tgtp;
1372
1373        if (!phba->targetport || !phba->targetport->private)
1374                return;
1375
1376        lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377                        "6202 NVMET XPT releasing hosthandle x%px "
1378                        "DID x%x xflags x%x refcnt %d\n",
1379                        hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380                        kref_read(&ndlp->kref));
1381        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382        spin_lock_irq(&ndlp->lock);
1383        ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384        spin_unlock_irq(&ndlp->lock);
1385        lpfc_nlp_put(ndlp);
1386        atomic_set(&tgtp->state, 0);
1387}
1388
1389static void
1390lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1391{
1392        struct lpfc_nvmet_tgtport *tgtp;
1393        struct lpfc_hba *phba;
1394        uint32_t rc;
1395
1396        tgtp = tgtport->private;
1397        phba = tgtp->phba;
1398
1399        rc = lpfc_issue_els_rscn(phba->pport, 0);
1400        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401                        "6420 NVMET subsystem change: Notification %s\n",
1402                        (rc) ? "Failed" : "Sent");
1403}
1404
1405static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406        .targetport_delete = lpfc_nvmet_targetport_delete,
1407        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1408        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1409        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1410        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411        .defer_rcv      = lpfc_nvmet_defer_rcv,
1412        .discovery_event = lpfc_nvmet_discovery_event,
1413        .ls_req         = lpfc_nvmet_ls_req,
1414        .ls_abort       = lpfc_nvmet_ls_abort,
1415        .host_release   = lpfc_nvmet_host_release,
1416
1417        .max_hw_queues  = 1,
1418        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419        .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420        .dma_boundary = 0xFFFFFFFF,
1421
1422        /* optional features */
1423        .target_features = 0,
1424        /* sizes of additional private data for data structures */
1425        .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426        .lsrqst_priv_sz = 0,
1427};
1428
1429static void
1430__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431                struct lpfc_nvmet_ctx_info *infop)
1432{
1433        struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434        unsigned long flags;
1435
1436        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437        list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438                                &infop->nvmet_ctx_list, list) {
1439                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440                list_del_init(&ctx_buf->list);
1441                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1442
1443                spin_lock(&phba->hbalock);
1444                __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1445                spin_unlock(&phba->hbalock);
1446
1447                ctx_buf->sglq->state = SGL_FREED;
1448                ctx_buf->sglq->ndlp = NULL;
1449
1450                spin_lock(&phba->sli4_hba.sgl_list_lock);
1451                list_add_tail(&ctx_buf->sglq->list,
1452                                &phba->sli4_hba.lpfc_nvmet_sgl_list);
1453                spin_unlock(&phba->sli4_hba.sgl_list_lock);
1454
1455                lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1456                kfree(ctx_buf->context);
1457        }
1458        spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1459}
1460
1461static void
1462lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1463{
1464        struct lpfc_nvmet_ctx_info *infop;
1465        int i, j;
1466
1467        /* The first context list, MRQ 0 CPU 0 */
1468        infop = phba->sli4_hba.nvmet_ctx_info;
1469        if (!infop)
1470                return;
1471
1472        /* Cycle the the entire CPU context list for every MRQ */
1473        for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1474                for_each_present_cpu(j) {
1475                        infop = lpfc_get_ctx_list(phba, j, i);
1476                        __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1477                }
1478        }
1479        kfree(phba->sli4_hba.nvmet_ctx_info);
1480        phba->sli4_hba.nvmet_ctx_info = NULL;
1481}
1482
1483static int
1484lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1485{
1486        struct lpfc_nvmet_ctxbuf *ctx_buf;
1487        struct lpfc_iocbq *nvmewqe;
1488        union lpfc_wqe128 *wqe;
1489        struct lpfc_nvmet_ctx_info *last_infop;
1490        struct lpfc_nvmet_ctx_info *infop;
1491        int i, j, idx, cpu;
1492
1493        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1494                        "6403 Allocate NVMET resources for %d XRIs\n",
1495                        phba->sli4_hba.nvmet_xri_cnt);
1496
1497        phba->sli4_hba.nvmet_ctx_info = kcalloc(
1498                phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1499                sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1500        if (!phba->sli4_hba.nvmet_ctx_info) {
1501                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1502                                "6419 Failed allocate memory for "
1503                                "nvmet context lists\n");
1504                return -ENOMEM;
1505        }
1506
1507        /*
1508         * Assuming X CPUs in the system, and Y MRQs, allocate some
1509         * lpfc_nvmet_ctx_info structures as follows:
1510         *
1511         * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1512         * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1513         * ...
1514         * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1515         *
1516         * Each line represents a MRQ "silo" containing an entry for
1517         * every CPU.
1518         *
1519         * MRQ X is initially assumed to be associated with CPU X, thus
1520         * contexts are initially distributed across all MRQs using
1521         * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1522         * freed, the are freed to the MRQ silo based on the CPU number
1523         * of the IO completion. Thus a context that was allocated for MRQ A
1524         * whose IO completed on CPU B will be freed to cpuB/mrqA.
1525         */
1526        for_each_possible_cpu(i) {
1527                for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1528                        infop = lpfc_get_ctx_list(phba, i, j);
1529                        INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1530                        spin_lock_init(&infop->nvmet_ctx_list_lock);
1531                        infop->nvmet_ctx_list_cnt = 0;
1532                }
1533        }
1534
1535        /*
1536         * Setup the next CPU context info ptr for each MRQ.
1537         * MRQ 0 will cycle thru CPUs 0 - X separately from
1538         * MRQ 1 cycling thru CPUs 0 - X, and so on.
1539         */
1540        for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1541                last_infop = lpfc_get_ctx_list(phba,
1542                                               cpumask_first(cpu_present_mask),
1543                                               j);
1544                for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1545                        infop = lpfc_get_ctx_list(phba, i, j);
1546                        infop->nvmet_ctx_next_cpu = last_infop;
1547                        last_infop = infop;
1548                }
1549        }
1550
1551        /* For all nvmet xris, allocate resources needed to process a
1552         * received command on a per xri basis.
1553         */
1554        idx = 0;
1555        cpu = cpumask_first(cpu_present_mask);
1556        for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1557                ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1558                if (!ctx_buf) {
1559                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1560                                        "6404 Ran out of memory for NVMET\n");
1561                        return -ENOMEM;
1562                }
1563
1564                ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1565                                           GFP_KERNEL);
1566                if (!ctx_buf->context) {
1567                        kfree(ctx_buf);
1568                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1569                                        "6405 Ran out of NVMET "
1570                                        "context memory\n");
1571                        return -ENOMEM;
1572                }
1573                ctx_buf->context->ctxbuf = ctx_buf;
1574                ctx_buf->context->state = LPFC_NVME_STE_FREE;
1575
1576                ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1577                if (!ctx_buf->iocbq) {
1578                        kfree(ctx_buf->context);
1579                        kfree(ctx_buf);
1580                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1581                                        "6406 Ran out of NVMET iocb/WQEs\n");
1582                        return -ENOMEM;
1583                }
1584                ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1585                nvmewqe = ctx_buf->iocbq;
1586                wqe = &nvmewqe->wqe;
1587
1588                /* Initialize WQE */
1589                memset(wqe, 0, sizeof(union lpfc_wqe));
1590
1591                ctx_buf->iocbq->context1 = NULL;
1592                spin_lock(&phba->sli4_hba.sgl_list_lock);
1593                ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1594                spin_unlock(&phba->sli4_hba.sgl_list_lock);
1595                if (!ctx_buf->sglq) {
1596                        lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1597                        kfree(ctx_buf->context);
1598                        kfree(ctx_buf);
1599                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1600                                        "6407 Ran out of NVMET XRIs\n");
1601                        return -ENOMEM;
1602                }
1603                INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1604
1605                /*
1606                 * Add ctx to MRQidx context list. Our initial assumption
1607                 * is MRQidx will be associated with CPUidx. This association
1608                 * can change on the fly.
1609                 */
1610                infop = lpfc_get_ctx_list(phba, cpu, idx);
1611                spin_lock(&infop->nvmet_ctx_list_lock);
1612                list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1613                infop->nvmet_ctx_list_cnt++;
1614                spin_unlock(&infop->nvmet_ctx_list_lock);
1615
1616                /* Spread ctx structures evenly across all MRQs */
1617                idx++;
1618                if (idx >= phba->cfg_nvmet_mrq) {
1619                        idx = 0;
1620                        cpu = cpumask_first(cpu_present_mask);
1621                        continue;
1622                }
1623                cpu = cpumask_next(cpu, cpu_present_mask);
1624                if (cpu == nr_cpu_ids)
1625                        cpu = cpumask_first(cpu_present_mask);
1626
1627        }
1628
1629        for_each_present_cpu(i) {
1630                for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1631                        infop = lpfc_get_ctx_list(phba, i, j);
1632                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1633                                        "6408 TOTAL NVMET ctx for CPU %d "
1634                                        "MRQ %d: cnt %d nextcpu x%px\n",
1635                                        i, j, infop->nvmet_ctx_list_cnt,
1636                                        infop->nvmet_ctx_next_cpu);
1637                }
1638        }
1639        return 0;
1640}
1641
1642int
1643lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1644{
1645        struct lpfc_vport  *vport = phba->pport;
1646        struct lpfc_nvmet_tgtport *tgtp;
1647        struct nvmet_fc_port_info pinfo;
1648        int error;
1649
1650        if (phba->targetport)
1651                return 0;
1652
1653        error = lpfc_nvmet_setup_io_context(phba);
1654        if (error)
1655                return error;
1656
1657        memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1658        pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1659        pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1660        pinfo.port_id = vport->fc_myDID;
1661
1662        /* We need to tell the transport layer + 1 because it takes page
1663         * alignment into account. When space for the SGL is allocated we
1664         * allocate + 3, one for cmd, one for rsp and one for this alignment
1665         */
1666        lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1667        lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1668        lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1669
1670#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1671        error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1672                                             &phba->pcidev->dev,
1673                                             &phba->targetport);
1674#else
1675        error = -ENOENT;
1676#endif
1677        if (error) {
1678                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1679                                "6025 Cannot register NVME targetport x%x: "
1680                                "portnm %llx nodenm %llx segs %d qs %d\n",
1681                                error,
1682                                pinfo.port_name, pinfo.node_name,
1683                                lpfc_tgttemplate.max_sgl_segments,
1684                                lpfc_tgttemplate.max_hw_queues);
1685                phba->targetport = NULL;
1686                phba->nvmet_support = 0;
1687
1688                lpfc_nvmet_cleanup_io_context(phba);
1689
1690        } else {
1691                tgtp = (struct lpfc_nvmet_tgtport *)
1692                        phba->targetport->private;
1693                tgtp->phba = phba;
1694
1695                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1696                                "6026 Registered NVME "
1697                                "targetport: x%px, private x%px "
1698                                "portnm %llx nodenm %llx segs %d qs %d\n",
1699                                phba->targetport, tgtp,
1700                                pinfo.port_name, pinfo.node_name,
1701                                lpfc_tgttemplate.max_sgl_segments,
1702                                lpfc_tgttemplate.max_hw_queues);
1703
1704                atomic_set(&tgtp->rcv_ls_req_in, 0);
1705                atomic_set(&tgtp->rcv_ls_req_out, 0);
1706                atomic_set(&tgtp->rcv_ls_req_drop, 0);
1707                atomic_set(&tgtp->xmt_ls_abort, 0);
1708                atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1709                atomic_set(&tgtp->xmt_ls_rsp, 0);
1710                atomic_set(&tgtp->xmt_ls_drop, 0);
1711                atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1712                atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1713                atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1714                atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1715                atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1716                atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1717                atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1718                atomic_set(&tgtp->xmt_fcp_drop, 0);
1719                atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1720                atomic_set(&tgtp->xmt_fcp_read, 0);
1721                atomic_set(&tgtp->xmt_fcp_write, 0);
1722                atomic_set(&tgtp->xmt_fcp_rsp, 0);
1723                atomic_set(&tgtp->xmt_fcp_release, 0);
1724                atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1725                atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1726                atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1727                atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1728                atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1729                atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1730                atomic_set(&tgtp->xmt_fcp_abort, 0);
1731                atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1732                atomic_set(&tgtp->xmt_abort_unsol, 0);
1733                atomic_set(&tgtp->xmt_abort_sol, 0);
1734                atomic_set(&tgtp->xmt_abort_rsp, 0);
1735                atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1736                atomic_set(&tgtp->defer_ctx, 0);
1737                atomic_set(&tgtp->defer_fod, 0);
1738                atomic_set(&tgtp->defer_wqfull, 0);
1739        }
1740        return error;
1741}
1742
1743int
1744lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1745{
1746        struct lpfc_vport  *vport = phba->pport;
1747
1748        if (!phba->targetport)
1749                return 0;
1750
1751        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1752                         "6007 Update NVMET port x%px did x%x\n",
1753                         phba->targetport, vport->fc_myDID);
1754
1755        phba->targetport->port_id = vport->fc_myDID;
1756        return 0;
1757}
1758
1759/**
1760 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1761 * @phba: pointer to lpfc hba data structure.
1762 * @axri: pointer to the nvmet xri abort wcqe structure.
1763 *
1764 * This routine is invoked by the worker thread to process a SLI4 fast-path
1765 * NVMET aborted xri.
1766 **/
1767void
1768lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1769                            struct sli4_wcqe_xri_aborted *axri)
1770{
1771#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1772        uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1773        uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1774        struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1775        struct lpfc_nvmet_tgtport *tgtp;
1776        struct nvmefc_tgt_fcp_req *req = NULL;
1777        struct lpfc_nodelist *ndlp;
1778        unsigned long iflag = 0;
1779        int rrq_empty = 0;
1780        bool released = false;
1781
1782        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1783                        "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1784
1785        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1786                return;
1787
1788        if (phba->targetport) {
1789                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1790                atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1791        }
1792
1793        spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1794        list_for_each_entry_safe(ctxp, next_ctxp,
1795                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1796                                 list) {
1797                if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1798                        continue;
1799
1800                spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1801                                       iflag);
1802
1803                spin_lock_irqsave(&ctxp->ctxlock, iflag);
1804                /* Check if we already received a free context call
1805                 * and we have completed processing an abort situation.
1806                 */
1807                if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1808                    !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1809                        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1810                        list_del_init(&ctxp->list);
1811                        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1812                        released = true;
1813                }
1814                ctxp->flag &= ~LPFC_NVME_XBUSY;
1815                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1816
1817                rrq_empty = list_empty(&phba->active_rrq_list);
1818                ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1819                if (ndlp &&
1820                    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1821                     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1822                        lpfc_set_rrq_active(phba, ndlp,
1823                                ctxp->ctxbuf->sglq->sli4_lxritag,
1824                                rxid, 1);
1825                        lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1826                }
1827
1828                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1829                                "6318 XB aborted oxid x%x flg x%x (%x)\n",
1830                                ctxp->oxid, ctxp->flag, released);
1831                if (released)
1832                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1833
1834                if (rrq_empty)
1835                        lpfc_worker_wake_up(phba);
1836                return;
1837        }
1838        spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1839        ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1840        if (ctxp) {
1841                /*
1842                 *  Abort already done by FW, so BA_ACC sent.
1843                 *  However, the transport may be unaware.
1844                 */
1845                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1846                                "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1847                                "flag x%x oxid x%x rxid x%x\n",
1848                                xri, ctxp->state, ctxp->flag, ctxp->oxid,
1849                                rxid);
1850
1851                spin_lock_irqsave(&ctxp->ctxlock, iflag);
1852                ctxp->flag |= LPFC_NVME_ABTS_RCV;
1853                ctxp->state = LPFC_NVME_STE_ABORT;
1854                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1855
1856                lpfc_nvmeio_data(phba,
1857                                 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1858                                 xri, raw_smp_processor_id(), 0);
1859
1860                req = &ctxp->hdlrctx.fcp_req;
1861                if (req)
1862                        nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1863        }
1864#endif
1865}
1866
1867int
1868lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1869                           struct fc_frame_header *fc_hdr)
1870{
1871#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1872        struct lpfc_hba *phba = vport->phba;
1873        struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1874        struct nvmefc_tgt_fcp_req *rsp;
1875        uint32_t sid;
1876        uint16_t oxid, xri;
1877        unsigned long iflag = 0;
1878
1879        sid = sli4_sid_from_fc_hdr(fc_hdr);
1880        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1881
1882        spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1883        list_for_each_entry_safe(ctxp, next_ctxp,
1884                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1885                                 list) {
1886                if (ctxp->oxid != oxid || ctxp->sid != sid)
1887                        continue;
1888
1889                xri = ctxp->ctxbuf->sglq->sli4_xritag;
1890
1891                spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1892                                       iflag);
1893                spin_lock_irqsave(&ctxp->ctxlock, iflag);
1894                ctxp->flag |= LPFC_NVME_ABTS_RCV;
1895                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1896
1897                lpfc_nvmeio_data(phba,
1898                        "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1899                        xri, raw_smp_processor_id(), 0);
1900
1901                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1902                                "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1903
1904                rsp = &ctxp->hdlrctx.fcp_req;
1905                nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1906
1907                /* Respond with BA_ACC accordingly */
1908                lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1909                return 0;
1910        }
1911        spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1912        /* check the wait list */
1913        if (phba->sli4_hba.nvmet_io_wait_cnt) {
1914                struct rqb_dmabuf *nvmebuf;
1915                struct fc_frame_header *fc_hdr_tmp;
1916                u32 sid_tmp;
1917                u16 oxid_tmp;
1918                bool found = false;
1919
1920                spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1921
1922                /* match by oxid and s_id */
1923                list_for_each_entry(nvmebuf,
1924                                    &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1925                                    hbuf.list) {
1926                        fc_hdr_tmp = (struct fc_frame_header *)
1927                                        (nvmebuf->hbuf.virt);
1928                        oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1929                        sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1930                        if (oxid_tmp != oxid || sid_tmp != sid)
1931                                continue;
1932
1933                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1934                                        "6321 NVMET Rcv ABTS oxid x%x from x%x "
1935                                        "is waiting for a ctxp\n",
1936                                        oxid, sid);
1937
1938                        list_del_init(&nvmebuf->hbuf.list);
1939                        phba->sli4_hba.nvmet_io_wait_cnt--;
1940                        found = true;
1941                        break;
1942                }
1943                spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1944                                       iflag);
1945
1946                /* free buffer since already posted a new DMA buffer to RQ */
1947                if (found) {
1948                        nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1949                        /* Respond with BA_ACC accordingly */
1950                        lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1951                        return 0;
1952                }
1953        }
1954
1955        /* check active list */
1956        ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1957        if (ctxp) {
1958                xri = ctxp->ctxbuf->sglq->sli4_xritag;
1959
1960                spin_lock_irqsave(&ctxp->ctxlock, iflag);
1961                ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1962                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1963
1964                lpfc_nvmeio_data(phba,
1965                                 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1966                                 xri, raw_smp_processor_id(), 0);
1967
1968                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1969                                "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1970                                "flag x%x state x%x\n",
1971                                ctxp->oxid, xri, ctxp->flag, ctxp->state);
1972
1973                if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1974                        /* Notify the transport */
1975                        nvmet_fc_rcv_fcp_abort(phba->targetport,
1976                                               &ctxp->hdlrctx.fcp_req);
1977                } else {
1978                        cancel_work_sync(&ctxp->ctxbuf->defer_work);
1979                        spin_lock_irqsave(&ctxp->ctxlock, iflag);
1980                        lpfc_nvmet_defer_release(phba, ctxp);
1981                        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1982                }
1983                lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1984                                               ctxp->oxid);
1985
1986                lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1987                return 0;
1988        }
1989
1990        lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1991                         oxid, raw_smp_processor_id(), 1);
1992
1993        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1994                        "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1995
1996        /* Respond with BA_RJT accordingly */
1997        lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1998#endif
1999        return 0;
2000}
2001
2002static void
2003lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2004                        struct lpfc_async_xchg_ctx *ctxp)
2005{
2006        struct lpfc_sli_ring *pring;
2007        struct lpfc_iocbq *nvmewqeq;
2008        struct lpfc_iocbq *next_nvmewqeq;
2009        unsigned long iflags;
2010        struct lpfc_wcqe_complete wcqe;
2011        struct lpfc_wcqe_complete *wcqep;
2012
2013        pring = wq->pring;
2014        wcqep = &wcqe;
2015
2016        /* Fake an ABORT error code back to cmpl routine */
2017        memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2018        bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2019        wcqep->parameter = IOERR_ABORT_REQUESTED;
2020
2021        spin_lock_irqsave(&pring->ring_lock, iflags);
2022        list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2023                                 &wq->wqfull_list, list) {
2024                if (ctxp) {
2025                        /* Checking for a specific IO to flush */
2026                        if (nvmewqeq->context2 == ctxp) {
2027                                list_del(&nvmewqeq->list);
2028                                spin_unlock_irqrestore(&pring->ring_lock,
2029                                                       iflags);
2030                                lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2031                                                          wcqep);
2032                                return;
2033                        }
2034                        continue;
2035                } else {
2036                        /* Flush all IOs */
2037                        list_del(&nvmewqeq->list);
2038                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
2039                        lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2040                        spin_lock_irqsave(&pring->ring_lock, iflags);
2041                }
2042        }
2043        if (!ctxp)
2044                wq->q_flag &= ~HBA_NVMET_WQFULL;
2045        spin_unlock_irqrestore(&pring->ring_lock, iflags);
2046}
2047
2048void
2049lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2050                          struct lpfc_queue *wq)
2051{
2052#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2053        struct lpfc_sli_ring *pring;
2054        struct lpfc_iocbq *nvmewqeq;
2055        struct lpfc_async_xchg_ctx *ctxp;
2056        unsigned long iflags;
2057        int rc;
2058
2059        /*
2060         * Some WQE slots are available, so try to re-issue anything
2061         * on the WQ wqfull_list.
2062         */
2063        pring = wq->pring;
2064        spin_lock_irqsave(&pring->ring_lock, iflags);
2065        while (!list_empty(&wq->wqfull_list)) {
2066                list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2067                                 list);
2068                spin_unlock_irqrestore(&pring->ring_lock, iflags);
2069                ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2070                rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2071                spin_lock_irqsave(&pring->ring_lock, iflags);
2072                if (rc == -EBUSY) {
2073                        /* WQ was full again, so put it back on the list */
2074                        list_add(&nvmewqeq->list, &wq->wqfull_list);
2075                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
2076                        return;
2077                }
2078                if (rc == WQE_SUCCESS) {
2079#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2080                        if (ctxp->ts_cmd_nvme) {
2081                                if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2082                                        ctxp->ts_status_wqput = ktime_get_ns();
2083                                else
2084                                        ctxp->ts_data_wqput = ktime_get_ns();
2085                        }
2086#endif
2087                } else {
2088                        WARN_ON(rc);
2089                }
2090        }
2091        wq->q_flag &= ~HBA_NVMET_WQFULL;
2092        spin_unlock_irqrestore(&pring->ring_lock, iflags);
2093
2094#endif
2095}
2096
2097void
2098lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2099{
2100#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2101        struct lpfc_nvmet_tgtport *tgtp;
2102        struct lpfc_queue *wq;
2103        uint32_t qidx;
2104        DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2105
2106        if (phba->nvmet_support == 0)
2107                return;
2108        if (phba->targetport) {
2109                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2110                for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2111                        wq = phba->sli4_hba.hdwq[qidx].io_wq;
2112                        lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2113                }
2114                tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2115                nvmet_fc_unregister_targetport(phba->targetport);
2116                if (!wait_for_completion_timeout(&tport_unreg_cmp,
2117                                        msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2118                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2119                                        "6179 Unreg targetport x%px timeout "
2120                                        "reached.\n", phba->targetport);
2121                lpfc_nvmet_cleanup_io_context(phba);
2122        }
2123        phba->targetport = NULL;
2124#endif
2125}
2126
2127/**
2128 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2129 * @phba: pointer to lpfc hba data structure.
2130 * @axchg: pointer to exchange context for the NVME LS request
2131 *
2132 * This routine is used for processing an asychronously received NVME LS
2133 * request. Any remaining validation is done and the LS is then forwarded
2134 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2135 *
2136 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2137 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2138 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2139 *
2140 * Returns 0 if LS was handled and delivered to the transport
2141 * Returns 1 if LS failed to be handled and should be dropped
2142 */
2143int
2144lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2145                        struct lpfc_async_xchg_ctx *axchg)
2146{
2147#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2148        struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2149        uint32_t *payload = axchg->payload;
2150        int rc;
2151
2152        atomic_inc(&tgtp->rcv_ls_req_in);
2153
2154        /*
2155         * Driver passes the ndlp as the hosthandle argument allowing
2156         * the transport to generate LS requests for any associateions
2157         * that are created.
2158         */
2159        rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2160                                 axchg->payload, axchg->size);
2161
2162        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2163                        "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2164                        "%08x %08x %08x\n", axchg->size, rc,
2165                        *payload, *(payload+1), *(payload+2),
2166                        *(payload+3), *(payload+4), *(payload+5));
2167
2168        if (!rc) {
2169                atomic_inc(&tgtp->rcv_ls_req_out);
2170                return 0;
2171        }
2172
2173        atomic_inc(&tgtp->rcv_ls_req_drop);
2174#endif
2175        return 1;
2176}
2177
2178static void
2179lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2180{
2181#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2182        struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2183        struct lpfc_hba *phba = ctxp->phba;
2184        struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2185        struct lpfc_nvmet_tgtport *tgtp;
2186        uint32_t *payload, qno;
2187        uint32_t rc;
2188        unsigned long iflags;
2189
2190        if (!nvmebuf) {
2191                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2192                        "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2193                        "oxid: x%x flg: x%x state: x%x\n",
2194                        ctxp->oxid, ctxp->flag, ctxp->state);
2195                spin_lock_irqsave(&ctxp->ctxlock, iflags);
2196                lpfc_nvmet_defer_release(phba, ctxp);
2197                spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2198                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2199                                                 ctxp->oxid);
2200                return;
2201        }
2202
2203        if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2204                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2205                                "6324 IO oxid x%x aborted\n",
2206                                ctxp->oxid);
2207                return;
2208        }
2209
2210        payload = (uint32_t *)(nvmebuf->dbuf.virt);
2211        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2212        ctxp->flag |= LPFC_NVME_TNOTIFY;
2213#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2214        if (ctxp->ts_isr_cmd)
2215                ctxp->ts_cmd_nvme = ktime_get_ns();
2216#endif
2217        /*
2218         * The calling sequence should be:
2219         * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2220         * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2221         * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2222         * the NVME command / FC header is stored.
2223         * A buffer has already been reposted for this IO, so just free
2224         * the nvmebuf.
2225         */
2226        rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2227                                  payload, ctxp->size);
2228        /* Process FCP command */
2229        if (rc == 0) {
2230                atomic_inc(&tgtp->rcv_fcp_cmd_out);
2231                spin_lock_irqsave(&ctxp->ctxlock, iflags);
2232                if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2233                    (nvmebuf != ctxp->rqb_buffer)) {
2234                        spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2235                        return;
2236                }
2237                ctxp->rqb_buffer = NULL;
2238                spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2239                lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2240                return;
2241        }
2242
2243        /* Processing of FCP command is deferred */
2244        if (rc == -EOVERFLOW) {
2245                lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2246                                 "from %06x\n",
2247                                 ctxp->oxid, ctxp->size, ctxp->sid);
2248                atomic_inc(&tgtp->rcv_fcp_cmd_out);
2249                atomic_inc(&tgtp->defer_fod);
2250                spin_lock_irqsave(&ctxp->ctxlock, iflags);
2251                if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2252                        spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2253                        return;
2254                }
2255                spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2256                /*
2257                 * Post a replacement DMA buffer to RQ and defer
2258                 * freeing rcv buffer till .defer_rcv callback
2259                 */
2260                qno = nvmebuf->idx;
2261                lpfc_post_rq_buffer(
2262                        phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2263                        phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2264                return;
2265        }
2266        ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2267        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2268        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2269                        "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2270                        ctxp->oxid, rc,
2271                        atomic_read(&tgtp->rcv_fcp_cmd_in),
2272                        atomic_read(&tgtp->rcv_fcp_cmd_out),
2273                        atomic_read(&tgtp->xmt_fcp_release));
2274        lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2275                         ctxp->oxid, ctxp->size, ctxp->sid);
2276        spin_lock_irqsave(&ctxp->ctxlock, iflags);
2277        lpfc_nvmet_defer_release(phba, ctxp);
2278        spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2279        lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2280#endif
2281}
2282
2283static void
2284lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2285{
2286#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2287        struct lpfc_nvmet_ctxbuf *ctx_buf =
2288                container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2289
2290        lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2291#endif
2292}
2293
2294static struct lpfc_nvmet_ctxbuf *
2295lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2296                             struct lpfc_nvmet_ctx_info *current_infop)
2297{
2298#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2299        struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2300        struct lpfc_nvmet_ctx_info *get_infop;
2301        int i;
2302
2303        /*
2304         * The current_infop for the MRQ a NVME command IU was received
2305         * on is empty. Our goal is to replenish this MRQs context
2306         * list from a another CPUs.
2307         *
2308         * First we need to pick a context list to start looking on.
2309         * nvmet_ctx_start_cpu has available context the last time
2310         * we needed to replenish this CPU where nvmet_ctx_next_cpu
2311         * is just the next sequential CPU for this MRQ.
2312         */
2313        if (current_infop->nvmet_ctx_start_cpu)
2314                get_infop = current_infop->nvmet_ctx_start_cpu;
2315        else
2316                get_infop = current_infop->nvmet_ctx_next_cpu;
2317
2318        for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2319                if (get_infop == current_infop) {
2320                        get_infop = get_infop->nvmet_ctx_next_cpu;
2321                        continue;
2322                }
2323                spin_lock(&get_infop->nvmet_ctx_list_lock);
2324
2325                /* Just take the entire context list, if there are any */
2326                if (get_infop->nvmet_ctx_list_cnt) {
2327                        list_splice_init(&get_infop->nvmet_ctx_list,
2328                                    &current_infop->nvmet_ctx_list);
2329                        current_infop->nvmet_ctx_list_cnt =
2330                                get_infop->nvmet_ctx_list_cnt - 1;
2331                        get_infop->nvmet_ctx_list_cnt = 0;
2332                        spin_unlock(&get_infop->nvmet_ctx_list_lock);
2333
2334                        current_infop->nvmet_ctx_start_cpu = get_infop;
2335                        list_remove_head(&current_infop->nvmet_ctx_list,
2336                                         ctx_buf, struct lpfc_nvmet_ctxbuf,
2337                                         list);
2338                        return ctx_buf;
2339                }
2340
2341                /* Otherwise, move on to the next CPU for this MRQ */
2342                spin_unlock(&get_infop->nvmet_ctx_list_lock);
2343                get_infop = get_infop->nvmet_ctx_next_cpu;
2344        }
2345
2346#endif
2347        /* Nothing found, all contexts for the MRQ are in-flight */
2348        return NULL;
2349}
2350
2351/**
2352 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2353 * @phba: pointer to lpfc hba data structure.
2354 * @idx: relative index of MRQ vector
2355 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2356 * @isr_timestamp: in jiffies.
2357 * @cqflag: cq processing information regarding workload.
2358 *
2359 * This routine is used for processing the WQE associated with a unsolicited
2360 * event. It first determines whether there is an existing ndlp that matches
2361 * the DID from the unsolicited WQE. If not, it will create a new one with
2362 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2363 * WQE is then used to invoke the proper routine and to set up proper state
2364 * of the discovery state machine.
2365 **/
2366static void
2367lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2368                            uint32_t idx,
2369                            struct rqb_dmabuf *nvmebuf,
2370                            uint64_t isr_timestamp,
2371                            uint8_t cqflag)
2372{
2373        struct lpfc_async_xchg_ctx *ctxp;
2374        struct lpfc_nvmet_tgtport *tgtp;
2375        struct fc_frame_header *fc_hdr;
2376        struct lpfc_nvmet_ctxbuf *ctx_buf;
2377        struct lpfc_nvmet_ctx_info *current_infop;
2378        uint32_t size, oxid, sid, qno;
2379        unsigned long iflag;
2380        int current_cpu;
2381
2382        if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2383                return;
2384
2385        ctx_buf = NULL;
2386        if (!nvmebuf || !phba->targetport) {
2387                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2388                                "6157 NVMET FCP Drop IO\n");
2389                if (nvmebuf)
2390                        lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2391                return;
2392        }
2393
2394        /*
2395         * Get a pointer to the context list for this MRQ based on
2396         * the CPU this MRQ IRQ is associated with. If the CPU association
2397         * changes from our initial assumption, the context list could
2398         * be empty, thus it would need to be replenished with the
2399         * context list from another CPU for this MRQ.
2400         */
2401        current_cpu = raw_smp_processor_id();
2402        current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2403        spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2404        if (current_infop->nvmet_ctx_list_cnt) {
2405                list_remove_head(&current_infop->nvmet_ctx_list,
2406                                 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2407                current_infop->nvmet_ctx_list_cnt--;
2408        } else {
2409                ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2410        }
2411        spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2412
2413        fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2414        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2415        size = nvmebuf->bytes_recv;
2416
2417#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2418        if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2419                this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2420                if (idx != current_cpu)
2421                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2422                                        "6703 CPU Check rcv: "
2423                                        "cpu %d expect %d\n",
2424                                        current_cpu, idx);
2425        }
2426#endif
2427
2428        lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2429                         oxid, size, raw_smp_processor_id());
2430
2431        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2432
2433        if (!ctx_buf) {
2434                /* Queue this NVME IO to process later */
2435                spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2436                list_add_tail(&nvmebuf->hbuf.list,
2437                              &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2438                phba->sli4_hba.nvmet_io_wait_cnt++;
2439                phba->sli4_hba.nvmet_io_wait_total++;
2440                spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2441                                       iflag);
2442
2443                /* Post a brand new DMA buffer to RQ */
2444                qno = nvmebuf->idx;
2445                lpfc_post_rq_buffer(
2446                        phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2447                        phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2448
2449                atomic_inc(&tgtp->defer_ctx);
2450                return;
2451        }
2452
2453        sid = sli4_sid_from_fc_hdr(fc_hdr);
2454
2455        ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2456        spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2457        list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2458        spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2459        if (ctxp->state != LPFC_NVME_STE_FREE) {
2460                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2461                                "6414 NVMET Context corrupt %d %d oxid x%x\n",
2462                                ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2463        }
2464        ctxp->wqeq = NULL;
2465        ctxp->offset = 0;
2466        ctxp->phba = phba;
2467        ctxp->size = size;
2468        ctxp->oxid = oxid;
2469        ctxp->sid = sid;
2470        ctxp->idx = idx;
2471        ctxp->state = LPFC_NVME_STE_RCV;
2472        ctxp->entry_cnt = 1;
2473        ctxp->flag = 0;
2474        ctxp->ctxbuf = ctx_buf;
2475        ctxp->rqb_buffer = (void *)nvmebuf;
2476        ctxp->hdwq = NULL;
2477        spin_lock_init(&ctxp->ctxlock);
2478
2479#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2480        if (isr_timestamp)
2481                ctxp->ts_isr_cmd = isr_timestamp;
2482        ctxp->ts_cmd_nvme = 0;
2483        ctxp->ts_nvme_data = 0;
2484        ctxp->ts_data_wqput = 0;
2485        ctxp->ts_isr_data = 0;
2486        ctxp->ts_data_nvme = 0;
2487        ctxp->ts_nvme_status = 0;
2488        ctxp->ts_status_wqput = 0;
2489        ctxp->ts_isr_status = 0;
2490        ctxp->ts_status_nvme = 0;
2491#endif
2492
2493        atomic_inc(&tgtp->rcv_fcp_cmd_in);
2494        /* check for cq processing load */
2495        if (!cqflag) {
2496                lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2497                return;
2498        }
2499
2500        if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2501                atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2502                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2503                                "6325 Unable to queue work for oxid x%x. "
2504                                "FCP Drop IO [x%x x%x x%x]\n",
2505                                ctxp->oxid,
2506                                atomic_read(&tgtp->rcv_fcp_cmd_in),
2507                                atomic_read(&tgtp->rcv_fcp_cmd_out),
2508                                atomic_read(&tgtp->xmt_fcp_release));
2509
2510                spin_lock_irqsave(&ctxp->ctxlock, iflag);
2511                lpfc_nvmet_defer_release(phba, ctxp);
2512                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2513                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2514        }
2515}
2516
2517/**
2518 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2519 * @phba: pointer to lpfc hba data structure.
2520 * @idx: relative index of MRQ vector
2521 * @nvmebuf: pointer to received nvme data structure.
2522 * @isr_timestamp: in jiffies.
2523 * @cqflag: cq processing information regarding workload.
2524 *
2525 * This routine is used to process an unsolicited event received from a SLI
2526 * (Service Level Interface) ring. The actual processing of the data buffer
2527 * associated with the unsolicited event is done by invoking the routine
2528 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2529 * SLI RQ on which the unsolicited event was received.
2530 **/
2531void
2532lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2533                           uint32_t idx,
2534                           struct rqb_dmabuf *nvmebuf,
2535                           uint64_t isr_timestamp,
2536                           uint8_t cqflag)
2537{
2538        if (!nvmebuf) {
2539                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2540                                "3167 NVMET FCP Drop IO\n");
2541                return;
2542        }
2543        if (phba->nvmet_support == 0) {
2544                lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2545                return;
2546        }
2547        lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2548}
2549
2550/**
2551 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2552 * @phba: pointer to a host N_Port data structure.
2553 * @ctxp: Context info for NVME LS Request
2554 * @rspbuf: DMA buffer of NVME command.
2555 * @rspsize: size of the NVME command.
2556 *
2557 * This routine is used for allocating a lpfc-WQE data structure from
2558 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2559 * passed into the routine for discovery state machine to issue an Extended
2560 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2561 * and preparation routine that is used by all the discovery state machine
2562 * routines and the NVME command-specific fields will be later set up by
2563 * the individual discovery machine routines after calling this routine
2564 * allocating and preparing a generic WQE data structure. It fills in the
2565 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2566 * payload and response payload (if expected). The reference count on the
2567 * ndlp is incremented by 1 and the reference to the ndlp is put into
2568 * context1 of the WQE data structure for this WQE to hold the ndlp
2569 * reference for the command's callback function to access later.
2570 *
2571 * Return code
2572 *   Pointer to the newly allocated/prepared nvme wqe data structure
2573 *   NULL - when nvme wqe data structure allocation/preparation failed
2574 **/
2575static struct lpfc_iocbq *
2576lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2577                       struct lpfc_async_xchg_ctx *ctxp,
2578                       dma_addr_t rspbuf, uint16_t rspsize)
2579{
2580        struct lpfc_nodelist *ndlp;
2581        struct lpfc_iocbq *nvmewqe;
2582        union lpfc_wqe128 *wqe;
2583
2584        if (!lpfc_is_link_up(phba)) {
2585                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2586                                "6104 NVMET prep LS wqe: link err: "
2587                                "NPORT x%x oxid:x%x ste %d\n",
2588                                ctxp->sid, ctxp->oxid, ctxp->state);
2589                return NULL;
2590        }
2591
2592        /* Allocate buffer for  command wqe */
2593        nvmewqe = lpfc_sli_get_iocbq(phba);
2594        if (nvmewqe == NULL) {
2595                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2596                                "6105 NVMET prep LS wqe: No WQE: "
2597                                "NPORT x%x oxid x%x ste %d\n",
2598                                ctxp->sid, ctxp->oxid, ctxp->state);
2599                return NULL;
2600        }
2601
2602        ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2603        if (!ndlp ||
2604            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2605            (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2606                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2607                                "6106 NVMET prep LS wqe: No ndlp: "
2608                                "NPORT x%x oxid x%x ste %d\n",
2609                                ctxp->sid, ctxp->oxid, ctxp->state);
2610                goto nvme_wqe_free_wqeq_exit;
2611        }
2612        ctxp->wqeq = nvmewqe;
2613
2614        /* prevent preparing wqe with NULL ndlp reference */
2615        nvmewqe->context1 = lpfc_nlp_get(ndlp);
2616        if (nvmewqe->context1 == NULL)
2617                goto nvme_wqe_free_wqeq_exit;
2618        nvmewqe->context2 = ctxp;
2619
2620        wqe = &nvmewqe->wqe;
2621        memset(wqe, 0, sizeof(union lpfc_wqe));
2622
2623        /* Words 0 - 2 */
2624        wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2625        wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2626        wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2627        wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2628
2629        /* Word 3 */
2630
2631        /* Word 4 */
2632
2633        /* Word 5 */
2634        bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2635        bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2636        bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2637        bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2638        bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2639
2640        /* Word 6 */
2641        bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2642               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2643        bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2644
2645        /* Word 7 */
2646        bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2647               CMD_XMIT_SEQUENCE64_WQE);
2648        bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2649        bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2650        bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2651
2652        /* Word 8 */
2653        wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2654
2655        /* Word 9 */
2656        bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2657        /* Needs to be set by caller */
2658        bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2659
2660        /* Word 10 */
2661        bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2662        bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2663        bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2664               LPFC_WQE_LENLOC_WORD12);
2665        bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2666
2667        /* Word 11 */
2668        bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2669               LPFC_WQE_CQ_ID_DEFAULT);
2670        bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2671               OTHER_COMMAND);
2672
2673        /* Word 12 */
2674        wqe->xmit_sequence.xmit_len = rspsize;
2675
2676        nvmewqe->retry = 1;
2677        nvmewqe->vport = phba->pport;
2678        nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2679        nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2680
2681        /* Xmit NVMET response to remote NPORT <did> */
2682        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2683                        "6039 Xmit NVMET LS response to remote "
2684                        "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2685                        ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2686                        rspsize);
2687        return nvmewqe;
2688
2689nvme_wqe_free_wqeq_exit:
2690        nvmewqe->context2 = NULL;
2691        nvmewqe->context3 = NULL;
2692        lpfc_sli_release_iocbq(phba, nvmewqe);
2693        return NULL;
2694}
2695
2696
2697static struct lpfc_iocbq *
2698lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2699                        struct lpfc_async_xchg_ctx *ctxp)
2700{
2701        struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2702        struct lpfc_nvmet_tgtport *tgtp;
2703        struct sli4_sge *sgl;
2704        struct lpfc_nodelist *ndlp;
2705        struct lpfc_iocbq *nvmewqe;
2706        struct scatterlist *sgel;
2707        union lpfc_wqe128 *wqe;
2708        struct ulp_bde64 *bde;
2709        dma_addr_t physaddr;
2710        int i, cnt, nsegs;
2711        int do_pbde;
2712        int xc = 1;
2713
2714        if (!lpfc_is_link_up(phba)) {
2715                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2716                                "6107 NVMET prep FCP wqe: link err:"
2717                                "NPORT x%x oxid x%x ste %d\n",
2718                                ctxp->sid, ctxp->oxid, ctxp->state);
2719                return NULL;
2720        }
2721
2722        ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2723        if (!ndlp ||
2724            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2725             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2726                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2727                                "6108 NVMET prep FCP wqe: no ndlp: "
2728                                "NPORT x%x oxid x%x ste %d\n",
2729                                ctxp->sid, ctxp->oxid, ctxp->state);
2730                return NULL;
2731        }
2732
2733        if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2734                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2735                                "6109 NVMET prep FCP wqe: seg cnt err: "
2736                                "NPORT x%x oxid x%x ste %d cnt %d\n",
2737                                ctxp->sid, ctxp->oxid, ctxp->state,
2738                                phba->cfg_nvme_seg_cnt);
2739                return NULL;
2740        }
2741        nsegs = rsp->sg_cnt;
2742
2743        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2744        nvmewqe = ctxp->wqeq;
2745        if (nvmewqe == NULL) {
2746                /* Allocate buffer for  command wqe */
2747                nvmewqe = ctxp->ctxbuf->iocbq;
2748                if (nvmewqe == NULL) {
2749                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2750                                        "6110 NVMET prep FCP wqe: No "
2751                                        "WQE: NPORT x%x oxid x%x ste %d\n",
2752                                        ctxp->sid, ctxp->oxid, ctxp->state);
2753                        return NULL;
2754                }
2755                ctxp->wqeq = nvmewqe;
2756                xc = 0; /* create new XRI */
2757                nvmewqe->sli4_lxritag = NO_XRI;
2758                nvmewqe->sli4_xritag = NO_XRI;
2759        }
2760
2761        /* Sanity check */
2762        if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2763            (ctxp->entry_cnt == 1)) ||
2764            (ctxp->state == LPFC_NVME_STE_DATA)) {
2765                wqe = &nvmewqe->wqe;
2766        } else {
2767                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2768                                "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2769                                ctxp->state, ctxp->entry_cnt);
2770                return NULL;
2771        }
2772
2773        sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2774        switch (rsp->op) {
2775        case NVMET_FCOP_READDATA:
2776        case NVMET_FCOP_READDATA_RSP:
2777                /* From the tsend template, initialize words 7 - 11 */
2778                memcpy(&wqe->words[7],
2779                       &lpfc_tsend_cmd_template.words[7],
2780                       sizeof(uint32_t) * 5);
2781
2782                /* Words 0 - 2 : The first sg segment */
2783                sgel = &rsp->sg[0];
2784                physaddr = sg_dma_address(sgel);
2785                wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2786                wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2787                wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2788                wqe->fcp_tsend.bde.addrHigh =
2789                        cpu_to_le32(putPaddrHigh(physaddr));
2790
2791                /* Word 3 */
2792                wqe->fcp_tsend.payload_offset_len = 0;
2793
2794                /* Word 4 */
2795                wqe->fcp_tsend.relative_offset = ctxp->offset;
2796
2797                /* Word 5 */
2798                wqe->fcp_tsend.reserved = 0;
2799
2800                /* Word 6 */
2801                bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2802                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2803                bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2804                       nvmewqe->sli4_xritag);
2805
2806                /* Word 7 - set ar later */
2807
2808                /* Word 8 */
2809                wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2810
2811                /* Word 9 */
2812                bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2813                bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2814
2815                /* Word 10 - set wqes later, in template xc=1 */
2816                if (!xc)
2817                        bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2818
2819                /* Word 11 - set sup, irsp, irsplen later */
2820                do_pbde = 0;
2821
2822                /* Word 12 */
2823                wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2824
2825                /* Setup 2 SKIP SGEs */
2826                sgl->addr_hi = 0;
2827                sgl->addr_lo = 0;
2828                sgl->word2 = 0;
2829                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2830                sgl->word2 = cpu_to_le32(sgl->word2);
2831                sgl->sge_len = 0;
2832                sgl++;
2833                sgl->addr_hi = 0;
2834                sgl->addr_lo = 0;
2835                sgl->word2 = 0;
2836                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2837                sgl->word2 = cpu_to_le32(sgl->word2);
2838                sgl->sge_len = 0;
2839                sgl++;
2840                if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2841                        atomic_inc(&tgtp->xmt_fcp_read_rsp);
2842
2843                        /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2844
2845                        if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2846                                if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2847                                        bf_set(wqe_sup,
2848                                               &wqe->fcp_tsend.wqe_com, 1);
2849                        } else {
2850                                bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2851                                bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2852                                bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2853                                       ((rsp->rsplen >> 2) - 1));
2854                                memcpy(&wqe->words[16], rsp->rspaddr,
2855                                       rsp->rsplen);
2856                        }
2857                } else {
2858                        atomic_inc(&tgtp->xmt_fcp_read);
2859
2860                        /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2861                        bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2862                }
2863                break;
2864
2865        case NVMET_FCOP_WRITEDATA:
2866                /* From the treceive template, initialize words 3 - 11 */
2867                memcpy(&wqe->words[3],
2868                       &lpfc_treceive_cmd_template.words[3],
2869                       sizeof(uint32_t) * 9);
2870
2871                /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2872                wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2873                wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2874                wqe->fcp_treceive.bde.addrLow = 0;
2875                wqe->fcp_treceive.bde.addrHigh = 0;
2876
2877                /* Word 4 */
2878                wqe->fcp_treceive.relative_offset = ctxp->offset;
2879
2880                /* Word 6 */
2881                bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2882                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2883                bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2884                       nvmewqe->sli4_xritag);
2885
2886                /* Word 7 */
2887
2888                /* Word 8 */
2889                wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2890
2891                /* Word 9 */
2892                bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2893                bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2894
2895                /* Word 10 - in template xc=1 */
2896                if (!xc)
2897                        bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2898
2899                /* Word 11 - set pbde later */
2900                if (phba->cfg_enable_pbde) {
2901                        do_pbde = 1;
2902                } else {
2903                        bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2904                        do_pbde = 0;
2905                }
2906
2907                /* Word 12 */
2908                wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2909
2910                /* Setup 2 SKIP SGEs */
2911                sgl->addr_hi = 0;
2912                sgl->addr_lo = 0;
2913                sgl->word2 = 0;
2914                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2915                sgl->word2 = cpu_to_le32(sgl->word2);
2916                sgl->sge_len = 0;
2917                sgl++;
2918                sgl->addr_hi = 0;
2919                sgl->addr_lo = 0;
2920                sgl->word2 = 0;
2921                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2922                sgl->word2 = cpu_to_le32(sgl->word2);
2923                sgl->sge_len = 0;
2924                sgl++;
2925                atomic_inc(&tgtp->xmt_fcp_write);
2926                break;
2927
2928        case NVMET_FCOP_RSP:
2929                /* From the treceive template, initialize words 4 - 11 */
2930                memcpy(&wqe->words[4],
2931                       &lpfc_trsp_cmd_template.words[4],
2932                       sizeof(uint32_t) * 8);
2933
2934                /* Words 0 - 2 */
2935                physaddr = rsp->rspdma;
2936                wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2937                wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2938                wqe->fcp_trsp.bde.addrLow =
2939                        cpu_to_le32(putPaddrLow(physaddr));
2940                wqe->fcp_trsp.bde.addrHigh =
2941                        cpu_to_le32(putPaddrHigh(physaddr));
2942
2943                /* Word 3 */
2944                wqe->fcp_trsp.response_len = rsp->rsplen;
2945
2946                /* Word 6 */
2947                bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2948                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2949                bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2950                       nvmewqe->sli4_xritag);
2951
2952                /* Word 7 */
2953
2954                /* Word 8 */
2955                wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2956
2957                /* Word 9 */
2958                bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2959                bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2960
2961                /* Word 10 */
2962                if (xc)
2963                        bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2964
2965                /* Word 11 */
2966                /* In template wqes=0 irsp=0 irsplen=0 - good response */
2967                if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2968                        /* Bad response - embed it */
2969                        bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2970                        bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2971                        bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2972                               ((rsp->rsplen >> 2) - 1));
2973                        memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2974                }
2975                do_pbde = 0;
2976
2977                /* Word 12 */
2978                wqe->fcp_trsp.rsvd_12_15[0] = 0;
2979
2980                /* Use rspbuf, NOT sg list */
2981                nsegs = 0;
2982                sgl->word2 = 0;
2983                atomic_inc(&tgtp->xmt_fcp_rsp);
2984                break;
2985
2986        default:
2987                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2988                                "6064 Unknown Rsp Op %d\n",
2989                                rsp->op);
2990                return NULL;
2991        }
2992
2993        nvmewqe->retry = 1;
2994        nvmewqe->vport = phba->pport;
2995        nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2996        nvmewqe->context1 = ndlp;
2997
2998        for_each_sg(rsp->sg, sgel, nsegs, i) {
2999                physaddr = sg_dma_address(sgel);
3000                cnt = sg_dma_len(sgel);
3001                sgl->addr_hi = putPaddrHigh(physaddr);
3002                sgl->addr_lo = putPaddrLow(physaddr);
3003                sgl->word2 = 0;
3004                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3005                bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3006                if ((i+1) == rsp->sg_cnt)
3007                        bf_set(lpfc_sli4_sge_last, sgl, 1);
3008                sgl->word2 = cpu_to_le32(sgl->word2);
3009                sgl->sge_len = cpu_to_le32(cnt);
3010                if (i == 0) {
3011                        bde = (struct ulp_bde64 *)&wqe->words[13];
3012                        if (do_pbde) {
3013                                /* Words 13-15  (PBDE) */
3014                                bde->addrLow = sgl->addr_lo;
3015                                bde->addrHigh = sgl->addr_hi;
3016                                bde->tus.f.bdeSize =
3017                                        le32_to_cpu(sgl->sge_len);
3018                                bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3019                                bde->tus.w = cpu_to_le32(bde->tus.w);
3020                        } else {
3021                                memset(bde, 0, sizeof(struct ulp_bde64));
3022                        }
3023                }
3024                sgl++;
3025                ctxp->offset += cnt;
3026        }
3027        ctxp->state = LPFC_NVME_STE_DATA;
3028        ctxp->entry_cnt++;
3029        return nvmewqe;
3030}
3031
3032/**
3033 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3034 * @phba: Pointer to HBA context object.
3035 * @cmdwqe: Pointer to driver command WQE object.
3036 * @wcqe: Pointer to driver response CQE object.
3037 *
3038 * The function is called from SLI ring event handler with no
3039 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3040 * The function frees memory resources used for the NVME commands.
3041 **/
3042static void
3043lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3044                             struct lpfc_wcqe_complete *wcqe)
3045{
3046        struct lpfc_async_xchg_ctx *ctxp;
3047        struct lpfc_nvmet_tgtport *tgtp;
3048        uint32_t result;
3049        unsigned long flags;
3050        bool released = false;
3051
3052        ctxp = cmdwqe->context2;
3053        result = wcqe->parameter;
3054
3055        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3056        if (ctxp->flag & LPFC_NVME_ABORT_OP)
3057                atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3058
3059        spin_lock_irqsave(&ctxp->ctxlock, flags);
3060        ctxp->state = LPFC_NVME_STE_DONE;
3061
3062        /* Check if we already received a free context call
3063         * and we have completed processing an abort situation.
3064         */
3065        if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3066            !(ctxp->flag & LPFC_NVME_XBUSY)) {
3067                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3068                list_del_init(&ctxp->list);
3069                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3070                released = true;
3071        }
3072        ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3073        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3074        atomic_inc(&tgtp->xmt_abort_rsp);
3075
3076        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3077                        "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3078                        "WCQE: %08x %08x %08x %08x\n",
3079                        ctxp->oxid, ctxp->flag, released,
3080                        wcqe->word0, wcqe->total_data_placed,
3081                        result, wcqe->word3);
3082
3083        cmdwqe->context2 = NULL;
3084        cmdwqe->context3 = NULL;
3085        /*
3086         * if transport has released ctx, then can reuse it. Otherwise,
3087         * will be recycled by transport release call.
3088         */
3089        if (released)
3090                lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3091
3092        /* This is the iocbq for the abort, not the command */
3093        lpfc_sli_release_iocbq(phba, cmdwqe);
3094
3095        /* Since iaab/iaar are NOT set, there is no work left.
3096         * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3097         * should have been called already.
3098         */
3099}
3100
3101/**
3102 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3103 * @phba: Pointer to HBA context object.
3104 * @cmdwqe: Pointer to driver command WQE object.
3105 * @wcqe: Pointer to driver response CQE object.
3106 *
3107 * The function is called from SLI ring event handler with no
3108 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3109 * The function frees memory resources used for the NVME commands.
3110 **/
3111static void
3112lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3113                               struct lpfc_wcqe_complete *wcqe)
3114{
3115        struct lpfc_async_xchg_ctx *ctxp;
3116        struct lpfc_nvmet_tgtport *tgtp;
3117        unsigned long flags;
3118        uint32_t result;
3119        bool released = false;
3120
3121        ctxp = cmdwqe->context2;
3122        result = wcqe->parameter;
3123
3124        if (!ctxp) {
3125                /* if context is clear, related io alrady complete */
3126                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3127                                "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3128                                wcqe->word0, wcqe->total_data_placed,
3129                                result, wcqe->word3);
3130                return;
3131        }
3132
3133        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3134        spin_lock_irqsave(&ctxp->ctxlock, flags);
3135        if (ctxp->flag & LPFC_NVME_ABORT_OP)
3136                atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3137
3138        /* Sanity check */
3139        if (ctxp->state != LPFC_NVME_STE_ABORT) {
3140                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3141                                "6112 ABTS Wrong state:%d oxid x%x\n",
3142                                ctxp->state, ctxp->oxid);
3143        }
3144
3145        /* Check if we already received a free context call
3146         * and we have completed processing an abort situation.
3147         */
3148        ctxp->state = LPFC_NVME_STE_DONE;
3149        if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3150            !(ctxp->flag & LPFC_NVME_XBUSY)) {
3151                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3152                list_del_init(&ctxp->list);
3153                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3154                released = true;
3155        }
3156        ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3157        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3158        atomic_inc(&tgtp->xmt_abort_rsp);
3159
3160        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3161                        "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3162                        "WCQE: %08x %08x %08x %08x\n",
3163                        ctxp->oxid, ctxp->flag, released,
3164                        wcqe->word0, wcqe->total_data_placed,
3165                        result, wcqe->word3);
3166
3167        cmdwqe->context2 = NULL;
3168        cmdwqe->context3 = NULL;
3169        /*
3170         * if transport has released ctx, then can reuse it. Otherwise,
3171         * will be recycled by transport release call.
3172         */
3173        if (released)
3174                lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3175
3176        /* Since iaab/iaar are NOT set, there is no work left.
3177         * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3178         * should have been called already.
3179         */
3180}
3181
3182/**
3183 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3184 * @phba: Pointer to HBA context object.
3185 * @cmdwqe: Pointer to driver command WQE object.
3186 * @wcqe: Pointer to driver response CQE object.
3187 *
3188 * The function is called from SLI ring event handler with no
3189 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3190 * The function frees memory resources used for the NVME commands.
3191 **/
3192static void
3193lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3194                            struct lpfc_wcqe_complete *wcqe)
3195{
3196        struct lpfc_async_xchg_ctx *ctxp;
3197        struct lpfc_nvmet_tgtport *tgtp;
3198        uint32_t result;
3199
3200        ctxp = cmdwqe->context2;
3201        result = wcqe->parameter;
3202
3203        if (phba->nvmet_support) {
3204                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3205                atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3206        }
3207
3208        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3209                        "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3210                        ctxp, wcqe->word0, wcqe->total_data_placed,
3211                        result, wcqe->word3);
3212
3213        if (!ctxp) {
3214                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3215                                "6415 NVMET LS Abort No ctx: WCQE: "
3216                                 "%08x %08x %08x %08x\n",
3217                                wcqe->word0, wcqe->total_data_placed,
3218                                result, wcqe->word3);
3219
3220                lpfc_sli_release_iocbq(phba, cmdwqe);
3221                return;
3222        }
3223
3224        if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3225                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3226                                "6416 NVMET LS abort cmpl state mismatch: "
3227                                "oxid x%x: %d %d\n",
3228                                ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3229        }
3230
3231        cmdwqe->context2 = NULL;
3232        cmdwqe->context3 = NULL;
3233        lpfc_sli_release_iocbq(phba, cmdwqe);
3234        kfree(ctxp);
3235}
3236
3237static int
3238lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3239                             struct lpfc_async_xchg_ctx *ctxp,
3240                             uint32_t sid, uint16_t xri)
3241{
3242        struct lpfc_nvmet_tgtport *tgtp = NULL;
3243        struct lpfc_iocbq *abts_wqeq;
3244        union lpfc_wqe128 *wqe_abts;
3245        struct lpfc_nodelist *ndlp;
3246
3247        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3248                        "6067 ABTS: sid %x xri x%x/x%x\n",
3249                        sid, xri, ctxp->wqeq->sli4_xritag);
3250
3251        if (phba->nvmet_support && phba->targetport)
3252                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3253
3254        ndlp = lpfc_findnode_did(phba->pport, sid);
3255        if (!ndlp ||
3256            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3257            (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3258                if (tgtp)
3259                        atomic_inc(&tgtp->xmt_abort_rsp_error);
3260                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3261                                "6134 Drop ABTS - wrong NDLP state x%x.\n",
3262                                (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3263
3264                /* No failure to an ABTS request. */
3265                return 0;
3266        }
3267
3268        abts_wqeq = ctxp->wqeq;
3269        wqe_abts = &abts_wqeq->wqe;
3270
3271        /*
3272         * Since we zero the whole WQE, we need to ensure we set the WQE fields
3273         * that were initialized in lpfc_sli4_nvmet_alloc.
3274         */
3275        memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3276
3277        /* Word 5 */
3278        bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3279        bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3280        bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3281        bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3282        bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3283
3284        /* Word 6 */
3285        bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3286               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3287        bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3288               abts_wqeq->sli4_xritag);
3289
3290        /* Word 7 */
3291        bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3292               CMD_XMIT_SEQUENCE64_WQE);
3293        bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3294        bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3295        bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3296
3297        /* Word 8 */
3298        wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3299
3300        /* Word 9 */
3301        bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3302        /* Needs to be set by caller */
3303        bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3304
3305        /* Word 10 */
3306        bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3307        bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3308               LPFC_WQE_LENLOC_WORD12);
3309        bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3310        bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3311
3312        /* Word 11 */
3313        bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3314               LPFC_WQE_CQ_ID_DEFAULT);
3315        bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3316               OTHER_COMMAND);
3317
3318        abts_wqeq->vport = phba->pport;
3319        abts_wqeq->context1 = ndlp;
3320        abts_wqeq->context2 = ctxp;
3321        abts_wqeq->context3 = NULL;
3322        abts_wqeq->rsvd2 = 0;
3323        /* hba_wqidx should already be setup from command we are aborting */
3324        abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3325        abts_wqeq->iocb.ulpLe = 1;
3326
3327        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3328                        "6069 Issue ABTS to xri x%x reqtag x%x\n",
3329                        xri, abts_wqeq->iotag);
3330        return 1;
3331}
3332
3333/**
3334 * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
3335 * @pwqeq: Pointer to command iocb.
3336 * @xritag: Tag that  uniqely identifies the local exchange resource.
3337 * @opt: Option bits -
3338 *              bit 0 = inhibit sending abts on the link
3339 *
3340 * This function is called with hbalock held.
3341 **/
3342static void
3343lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
3344{
3345        union lpfc_wqe128 *wqe = &pwqeq->wqe;
3346
3347        /* WQEs are reused.  Clear stale data and set key fields to
3348         * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3349         */
3350        memset(wqe, 0, sizeof(*wqe));
3351
3352        if (opt & INHIBIT_ABORT)
3353                bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
3354        /* Abort specified xri tag, with the mask deliberately zeroed */
3355        bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
3356
3357        bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3358
3359        /* Abort the I/O associated with this outstanding exchange ID. */
3360        wqe->abort_cmd.wqe_com.abort_tag = xritag;
3361
3362        /* iotag for the wqe completion. */
3363        bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
3364
3365        bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
3366        bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3367
3368        bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3369        bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
3370        bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3371}
3372
3373static int
3374lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3375                               struct lpfc_async_xchg_ctx *ctxp,
3376                               uint32_t sid, uint16_t xri)
3377{
3378        struct lpfc_nvmet_tgtport *tgtp;
3379        struct lpfc_iocbq *abts_wqeq;
3380        struct lpfc_nodelist *ndlp;
3381        unsigned long flags;
3382        u8 opt;
3383        int rc;
3384
3385        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3386        if (!ctxp->wqeq) {
3387                ctxp->wqeq = ctxp->ctxbuf->iocbq;
3388                ctxp->wqeq->hba_wqidx = 0;
3389        }
3390
3391        ndlp = lpfc_findnode_did(phba->pport, sid);
3392        if (!ndlp ||
3393            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3394            (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3395                atomic_inc(&tgtp->xmt_abort_rsp_error);
3396                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3397                                "6160 Drop ABORT - wrong NDLP state x%x.\n",
3398                                (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3399
3400                /* No failure to an ABTS request. */
3401                spin_lock_irqsave(&ctxp->ctxlock, flags);
3402                ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3403                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3404                return 0;
3405        }
3406
3407        /* Issue ABTS for this WQE based on iotag */
3408        ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3409        spin_lock_irqsave(&ctxp->ctxlock, flags);
3410        if (!ctxp->abort_wqeq) {
3411                atomic_inc(&tgtp->xmt_abort_rsp_error);
3412                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3413                                "6161 ABORT failed: No wqeqs: "
3414                                "xri: x%x\n", ctxp->oxid);
3415                /* No failure to an ABTS request. */
3416                ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3417                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3418                return 0;
3419        }
3420        abts_wqeq = ctxp->abort_wqeq;
3421        ctxp->state = LPFC_NVME_STE_ABORT;
3422        opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3423        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3424
3425        /* Announce entry to new IO submit field. */
3426        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3427                        "6162 ABORT Request to rport DID x%06x "
3428                        "for xri x%x x%x\n",
3429                        ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3430
3431        /* If the hba is getting reset, this flag is set.  It is
3432         * cleared when the reset is complete and rings reestablished.
3433         */
3434        spin_lock_irqsave(&phba->hbalock, flags);
3435        /* driver queued commands are in process of being flushed */
3436        if (phba->hba_flag & HBA_IOQ_FLUSH) {
3437                spin_unlock_irqrestore(&phba->hbalock, flags);
3438                atomic_inc(&tgtp->xmt_abort_rsp_error);
3439                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3440                                "6163 Driver in reset cleanup - flushing "
3441                                "NVME Req now. hba_flag x%x oxid x%x\n",
3442                                phba->hba_flag, ctxp->oxid);
3443                lpfc_sli_release_iocbq(phba, abts_wqeq);
3444                spin_lock_irqsave(&ctxp->ctxlock, flags);
3445                ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3446                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3447                return 0;
3448        }
3449
3450        /* Outstanding abort is in progress */
3451        if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3452                spin_unlock_irqrestore(&phba->hbalock, flags);
3453                atomic_inc(&tgtp->xmt_abort_rsp_error);
3454                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3455                                "6164 Outstanding NVME I/O Abort Request "
3456                                "still pending on oxid x%x\n",
3457                                ctxp->oxid);
3458                lpfc_sli_release_iocbq(phba, abts_wqeq);
3459                spin_lock_irqsave(&ctxp->ctxlock, flags);
3460                ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3461                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3462                return 0;
3463        }
3464
3465        /* Ready - mark outstanding as aborted by driver. */
3466        abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3467
3468        lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3469
3470        /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3471        abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3472        abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3473        abts_wqeq->iocb_cmpl = NULL;
3474        abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3475        abts_wqeq->context2 = ctxp;
3476        abts_wqeq->vport = phba->pport;
3477        if (!ctxp->hdwq)
3478                ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3479
3480        rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3481        spin_unlock_irqrestore(&phba->hbalock, flags);
3482        if (rc == WQE_SUCCESS) {
3483                atomic_inc(&tgtp->xmt_abort_sol);
3484                return 0;
3485        }
3486
3487        atomic_inc(&tgtp->xmt_abort_rsp_error);
3488        spin_lock_irqsave(&ctxp->ctxlock, flags);
3489        ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3490        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3491        lpfc_sli_release_iocbq(phba, abts_wqeq);
3492        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3493                        "6166 Failed ABORT issue_wqe with status x%x "
3494                        "for oxid x%x.\n",
3495                        rc, ctxp->oxid);
3496        return 1;
3497}
3498
3499static int
3500lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3501                                 struct lpfc_async_xchg_ctx *ctxp,
3502                                 uint32_t sid, uint16_t xri)
3503{
3504        struct lpfc_nvmet_tgtport *tgtp;
3505        struct lpfc_iocbq *abts_wqeq;
3506        unsigned long flags;
3507        bool released = false;
3508        int rc;
3509
3510        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3511        if (!ctxp->wqeq) {
3512                ctxp->wqeq = ctxp->ctxbuf->iocbq;
3513                ctxp->wqeq->hba_wqidx = 0;
3514        }
3515
3516        if (ctxp->state == LPFC_NVME_STE_FREE) {
3517                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3518                                "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3519                                ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3520                rc = WQE_BUSY;
3521                goto aerr;
3522        }
3523        ctxp->state = LPFC_NVME_STE_ABORT;
3524        ctxp->entry_cnt++;
3525        rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3526        if (rc == 0)
3527                goto aerr;
3528
3529        spin_lock_irqsave(&phba->hbalock, flags);
3530        abts_wqeq = ctxp->wqeq;
3531        abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3532        abts_wqeq->iocb_cmpl = NULL;
3533        abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3534        if (!ctxp->hdwq)
3535                ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3536
3537        rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3538        spin_unlock_irqrestore(&phba->hbalock, flags);
3539        if (rc == WQE_SUCCESS) {
3540                return 0;
3541        }
3542
3543aerr:
3544        spin_lock_irqsave(&ctxp->ctxlock, flags);
3545        if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3546                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3547                list_del_init(&ctxp->list);
3548                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3549                released = true;
3550        }
3551        ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3552        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3553
3554        atomic_inc(&tgtp->xmt_abort_rsp_error);
3555        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3556                        "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3557                        "(%x)\n",
3558                        ctxp->oxid, rc, released);
3559        if (released)
3560                lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3561        return 1;
3562}
3563
3564/**
3565 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3566 *        via async frame receive where the frame is not handled.
3567 * @phba: pointer to adapter structure
3568 * @ctxp: pointer to the asynchronously received received sequence
3569 * @sid: address of the remote port to send the ABTS to
3570 * @xri: oxid value to for the ABTS (other side's exchange id).
3571 **/
3572int
3573lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3574                                struct lpfc_async_xchg_ctx *ctxp,
3575                                uint32_t sid, uint16_t xri)
3576{
3577        struct lpfc_nvmet_tgtport *tgtp = NULL;
3578        struct lpfc_iocbq *abts_wqeq;
3579        unsigned long flags;
3580        int rc;
3581
3582        if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3583            (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3584                ctxp->state = LPFC_NVME_STE_LS_ABORT;
3585                ctxp->entry_cnt++;
3586        } else {
3587                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3588                                "6418 NVMET LS abort state mismatch "
3589                                "IO x%x: %d %d\n",
3590                                ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3591                ctxp->state = LPFC_NVME_STE_LS_ABORT;
3592        }
3593
3594        if (phba->nvmet_support && phba->targetport)
3595                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3596
3597        if (!ctxp->wqeq) {
3598                /* Issue ABTS for this WQE based on iotag */
3599                ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3600                if (!ctxp->wqeq) {
3601                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3602                                        "6068 Abort failed: No wqeqs: "
3603                                        "xri: x%x\n", xri);
3604                        /* No failure to an ABTS request. */
3605                        kfree(ctxp);
3606                        return 0;
3607                }
3608        }
3609        abts_wqeq = ctxp->wqeq;
3610
3611        if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3612                rc = WQE_BUSY;
3613                goto out;
3614        }
3615
3616        spin_lock_irqsave(&phba->hbalock, flags);
3617        abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3618        abts_wqeq->iocb_cmpl = NULL;
3619        abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3620        rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3621        spin_unlock_irqrestore(&phba->hbalock, flags);
3622        if (rc == WQE_SUCCESS) {
3623                if (tgtp)
3624                        atomic_inc(&tgtp->xmt_abort_unsol);
3625                return 0;
3626        }
3627out:
3628        if (tgtp)
3629                atomic_inc(&tgtp->xmt_abort_rsp_error);
3630        abts_wqeq->context2 = NULL;
3631        abts_wqeq->context3 = NULL;
3632        lpfc_sli_release_iocbq(phba, abts_wqeq);
3633        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3634                        "6056 Failed to Issue ABTS. Status x%x\n", rc);
3635        return 1;
3636}
3637
3638/**
3639 * lpfc_nvmet_invalidate_host
3640 *
3641 * @phba: pointer to the driver instance bound to an adapter port.
3642 * @ndlp: pointer to an lpfc_nodelist type
3643 *
3644 * This routine upcalls the nvmet transport to invalidate an NVME
3645 * host to which this target instance had active connections.
3646 */
3647void
3648lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3649{
3650        u32 ndlp_has_hh;
3651        struct lpfc_nvmet_tgtport *tgtp;
3652
3653        lpfc_printf_log(phba, KERN_INFO,
3654                        LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3655                        "6203 Invalidating hosthandle x%px\n",
3656                        ndlp);
3657
3658        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3659        atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3660
3661        spin_lock_irq(&ndlp->lock);
3662        ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3663        spin_unlock_irq(&ndlp->lock);
3664
3665        /* Do not invalidate any nodes that do not have a hosthandle.
3666         * The host_release callbk will cause a node reference
3667         * count imbalance and a crash.
3668         */
3669        if (!ndlp_has_hh) {
3670                lpfc_printf_log(phba, KERN_INFO,
3671                                LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3672                                "6204 Skip invalidate on node x%px DID x%x\n",
3673                                ndlp, ndlp->nlp_DID);
3674                return;
3675        }
3676
3677#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3678        /* Need to get the nvmet_fc_target_port pointer here.*/
3679        nvmet_fc_invalidate_host(phba->targetport, ndlp);
3680#endif
3681}
3682