1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_nvme.h"
48#include "lpfc_scsi.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54
55
56static struct lpfc_io_buf *
57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58 int idx, int expedite);
59
60static void
61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63static struct nvme_fc_port_template lpfc_nvme_template;
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int
82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85{
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 if (!pnvme_lport->private)
92 return -ENOMEM;
93
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
95 vport = lport->vport;
96
97 if (!vport || vport->load_flag & FC_UNLOADING ||
98 vport->phba->hba_flag & HBA_IOQ_FLUSH)
99 return -ENODEV;
100
101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
102 if (qhandle == NULL)
103 return -ENOMEM;
104
105 qhandle->cpu_id = raw_smp_processor_id();
106 qhandle->qidx = qidx;
107
108
109
110
111
112 if (qidx) {
113 str = "IO ";
114 qhandle->index = ((qidx - 1) %
115 lpfc_nvme_template.max_hw_queues);
116 } else {
117 str = "ADM";
118 qhandle->index = qidx;
119 }
120
121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
122 "6073 Binding %s HdwQueue %d (cpu %d) to "
123 "hdw_queue %d qhandle x%px\n", str,
124 qidx, qhandle->cpu_id, qhandle->index, qhandle);
125 *handle = (void *)qhandle;
126 return 0;
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static void
144lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
145 unsigned int qidx,
146 void *handle)
147{
148 struct lpfc_nvme_lport *lport;
149 struct lpfc_vport *vport;
150
151 if (!pnvme_lport->private)
152 return;
153
154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
155 vport = lport->vport;
156
157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
159 lport, qidx, handle);
160 kfree(handle);
161}
162
163static void
164lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
165{
166 struct lpfc_nvme_lport *lport = localport->private;
167
168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
169 "6173 localport x%px delete complete\n",
170 lport);
171
172
173 if (lport->vport->localport)
174 complete(lport->lport_unreg_cmp);
175}
176
177
178
179
180
181
182
183
184
185
186
187
188static void
189lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
190{
191 struct lpfc_nvme_rport *rport = remoteport->private;
192 struct lpfc_vport *vport;
193 struct lpfc_nodelist *ndlp;
194 u32 fc4_xpt_flags;
195
196 ndlp = rport->ndlp;
197 if (!ndlp) {
198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
199 __func__, rport, remoteport);
200 goto rport_err;
201 }
202
203 vport = ndlp->vport;
204 if (!vport) {
205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
206 __func__, ndlp, ndlp->nlp_state, rport);
207 goto rport_err;
208 }
209
210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
211
212
213
214
215
216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
217 "6146 remoteport delete of remoteport x%px, ndlp x%px "
218 "DID x%x xflags x%x\n",
219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
220 spin_lock_irq(&ndlp->lock);
221
222
223
224
225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
227
228 spin_unlock_irq(&ndlp->lock);
229
230
231
232
233
234 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
235 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
236
237 rport_err:
238 return;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257int
258lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
259 struct lpfc_async_xchg_ctx *axchg)
260{
261#if (IS_ENABLED(CONFIG_NVME_FC))
262 struct lpfc_vport *vport;
263 struct lpfc_nvme_rport *lpfc_rport;
264 struct nvme_fc_remote_port *remoteport;
265 struct lpfc_nvme_lport *lport;
266 uint32_t *payload = axchg->payload;
267 int rc;
268
269 vport = axchg->ndlp->vport;
270 lpfc_rport = axchg->ndlp->nrport;
271 if (!lpfc_rport)
272 return -EINVAL;
273
274 remoteport = lpfc_rport->remoteport;
275 if (!vport->localport ||
276 vport->phba->hba_flag & HBA_IOQ_FLUSH)
277 return -EINVAL;
278
279 lport = vport->localport->private;
280 if (!lport)
281 return -EINVAL;
282
283 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
284 axchg->size);
285
286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
287 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
288 "%08x %08x %08x\n",
289 axchg->size, rc,
290 *payload, *(payload+1), *(payload+2),
291 *(payload+3), *(payload+4), *(payload+5));
292
293 if (!rc)
294 return 0;
295#endif
296 return 1;
297}
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312void
313__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
314 struct lpfc_iocbq *cmdwqe,
315 struct lpfc_wcqe_complete *wcqe)
316{
317 struct nvmefc_ls_req *pnvme_lsreq;
318 struct lpfc_dmabuf *buf_ptr;
319 struct lpfc_nodelist *ndlp;
320 uint32_t status;
321
322 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
323 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
324 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
325
326 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
327 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
328 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
329 "ndlp:x%px\n",
330 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
331 cmdwqe->sli4_xritag, status,
332 (wcqe->parameter & 0xffff),
333 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
334
335 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
336 cmdwqe->sli4_xritag, status, wcqe->parameter);
337
338 if (cmdwqe->context3) {
339 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
340 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
341 kfree(buf_ptr);
342 cmdwqe->context3 = NULL;
343 }
344 if (pnvme_lsreq->done)
345 pnvme_lsreq->done(pnvme_lsreq, status);
346 else
347 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
348 "6046 NVMEx cmpl without done call back? "
349 "Data x%px DID %x Xri: %x status %x\n",
350 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
351 cmdwqe->sli4_xritag, status);
352 if (ndlp) {
353 lpfc_nlp_put(ndlp);
354 cmdwqe->context1 = NULL;
355 }
356 lpfc_sli_release_iocbq(phba, cmdwqe);
357}
358
359static void
360lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
361 struct lpfc_iocbq *rspwqe)
362{
363 struct lpfc_vport *vport = cmdwqe->vport;
364 struct lpfc_nvme_lport *lport;
365 uint32_t status;
366 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
367
368 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
369
370 if (vport->localport) {
371 lport = (struct lpfc_nvme_lport *)vport->localport->private;
372 if (lport) {
373 atomic_inc(&lport->fc4NvmeLsCmpls);
374 if (status) {
375 if (bf_get(lpfc_wcqe_c_xb, wcqe))
376 atomic_inc(&lport->cmpl_ls_xb);
377 atomic_inc(&lport->cmpl_ls_err);
378 }
379 }
380 }
381
382 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
383}
384
385static int
386lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
387 struct lpfc_dmabuf *inp,
388 struct nvmefc_ls_req *pnvme_lsreq,
389 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
390 struct lpfc_iocbq *),
391 struct lpfc_nodelist *ndlp, uint32_t num_entry,
392 uint32_t tmo, uint8_t retry)
393{
394 struct lpfc_hba *phba = vport->phba;
395 union lpfc_wqe128 *wqe;
396 struct lpfc_iocbq *genwqe;
397 struct ulp_bde64 *bpl;
398 struct ulp_bde64 bde;
399 int i, rc, xmit_len, first_len;
400
401
402 genwqe = lpfc_sli_get_iocbq(phba);
403 if (genwqe == NULL)
404 return 1;
405
406 wqe = &genwqe->wqe;
407
408 memset(wqe, 0, sizeof(union lpfc_wqe));
409
410 genwqe->context3 = (uint8_t *)bmp;
411 genwqe->cmd_flag |= LPFC_IO_NVME_LS;
412
413
414 genwqe->context1 = lpfc_nlp_get(ndlp);
415 if (!genwqe->context1) {
416 dev_warn(&phba->pcidev->dev,
417 "Warning: Failed node ref, not sending LS_REQ\n");
418 lpfc_sli_release_iocbq(phba, genwqe);
419 return 1;
420 }
421
422 genwqe->context2 = (uint8_t *)pnvme_lsreq;
423
424
425 if (!tmo)
426
427 tmo = (3 * phba->fc_ratov);
428
429
430 xmit_len = 0;
431 first_len = 0;
432 bpl = (struct ulp_bde64 *)bmp->virt;
433 for (i = 0; i < num_entry; i++) {
434 bde.tus.w = bpl[i].tus.w;
435 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
436 break;
437 xmit_len += bde.tus.f.bdeSize;
438 if (i == 0)
439 first_len = xmit_len;
440 }
441
442 genwqe->num_bdes = num_entry;
443 genwqe->hba_wqidx = 0;
444
445
446 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
447 wqe->generic.bde.tus.f.bdeSize = first_len;
448 wqe->generic.bde.addrLow = bpl[0].addrLow;
449 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
450
451
452 wqe->gen_req.request_payload_len = first_len;
453
454
455
456
457 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
458 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
459 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
460 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
461 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
462
463
464 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
465 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
466 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
467
468
469 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
470 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
471 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
472 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
473
474
475 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
476
477
478 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
479
480
481 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
482 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
483 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
484 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
485 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
486
487
488 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
489 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
490
491
492
493 genwqe->cmd_cmpl = cmpl;
494 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
495 genwqe->vport = vport;
496 genwqe->retry = retry;
497
498 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
499 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
500
501 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
502 if (rc) {
503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504 "6045 Issue GEN REQ WQE to NPORT x%x "
505 "Data: x%x x%x rc x%x\n",
506 ndlp->nlp_DID, genwqe->iotag,
507 vport->port_state, rc);
508 lpfc_nlp_put(ndlp);
509 lpfc_sli_release_iocbq(phba, genwqe);
510 return 1;
511 }
512
513 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
514 "6050 Issue GEN REQ WQE to NPORT x%x "
515 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
516 "bmp:x%px xmit:%d 1st:%d\n",
517 ndlp->nlp_DID, genwqe->sli4_xritag,
518 vport->port_state,
519 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
520 return 0;
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538int
539__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
540 struct nvmefc_ls_req *pnvme_lsreq,
541 void (*gen_req_cmp)(struct lpfc_hba *phba,
542 struct lpfc_iocbq *cmdwqe,
543 struct lpfc_iocbq *rspwqe))
544{
545 struct lpfc_dmabuf *bmp;
546 struct ulp_bde64 *bpl;
547 int ret;
548 uint16_t ntype, nstate;
549
550 if (!ndlp) {
551 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
552 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
553 "LS Req\n",
554 ndlp);
555 return -ENODEV;
556 }
557
558 ntype = ndlp->nlp_type;
559 nstate = ndlp->nlp_state;
560 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
561 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
563 "6088 NVMEx LS REQ: Fail DID x%06x not "
564 "ready for IO. Type x%x, State x%x\n",
565 ndlp->nlp_DID, ntype, nstate);
566 return -ENODEV;
567 }
568 if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
569 return -ENODEV;
570
571 if (!vport->phba->sli4_hba.nvmels_wq)
572 return -ENOMEM;
573
574
575
576
577
578
579
580
581
582
583
584
585
586 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
587 if (!bmp) {
588 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
589 "6044 NVMEx LS REQ: Could not alloc LS buf "
590 "for DID %x\n",
591 ndlp->nlp_DID);
592 return -ENOMEM;
593 }
594
595 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
596 if (!bmp->virt) {
597 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
598 "6042 NVMEx LS REQ: Could not alloc mbuf "
599 "for DID %x\n",
600 ndlp->nlp_DID);
601 kfree(bmp);
602 return -ENOMEM;
603 }
604
605 INIT_LIST_HEAD(&bmp->list);
606
607 bpl = (struct ulp_bde64 *)bmp->virt;
608 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
609 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
610 bpl->tus.f.bdeFlags = 0;
611 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
612 bpl->tus.w = le32_to_cpu(bpl->tus.w);
613 bpl++;
614
615 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
616 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
617 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
618 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
619 bpl->tus.w = le32_to_cpu(bpl->tus.w);
620
621 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
622 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
623 "rqstlen:%d rsplen:%d %pad %pad\n",
624 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
625 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
626 &pnvme_lsreq->rspdma);
627
628 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
629 pnvme_lsreq, gen_req_cmp, ndlp, 2,
630 pnvme_lsreq->timeout, 0);
631 if (ret != WQE_SUCCESS) {
632 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
633 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
634 "lsreq x%px Status %x DID %x\n",
635 pnvme_lsreq, ret, ndlp->nlp_DID);
636 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
637 kfree(bmp);
638 return -EIO;
639 }
640
641 return 0;
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657static int
658lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
659 struct nvme_fc_remote_port *pnvme_rport,
660 struct nvmefc_ls_req *pnvme_lsreq)
661{
662 struct lpfc_nvme_lport *lport;
663 struct lpfc_nvme_rport *rport;
664 struct lpfc_vport *vport;
665 int ret;
666
667 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
668 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
669 if (unlikely(!lport) || unlikely(!rport))
670 return -EINVAL;
671
672 vport = lport->vport;
673 if (vport->load_flag & FC_UNLOADING ||
674 vport->phba->hba_flag & HBA_IOQ_FLUSH)
675 return -ENODEV;
676
677 atomic_inc(&lport->fc4NvmeLsRequests);
678
679 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
680 lpfc_nvme_ls_req_cmp);
681 if (ret)
682 atomic_inc(&lport->xmt_ls_err);
683
684 return ret;
685}
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701int
702__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
703 struct nvmefc_ls_req *pnvme_lsreq)
704{
705 struct lpfc_hba *phba = vport->phba;
706 struct lpfc_sli_ring *pring;
707 struct lpfc_iocbq *wqe, *next_wqe;
708 bool foundit = false;
709
710 if (!ndlp) {
711 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
712 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
713 "x%06x, Failing LS Req\n",
714 ndlp, ndlp ? ndlp->nlp_DID : 0);
715 return -EINVAL;
716 }
717
718 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
719 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
720 "x%px rqstlen:%d rsplen:%d %pad %pad\n",
721 pnvme_lsreq, pnvme_lsreq->rqstlen,
722 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
723 &pnvme_lsreq->rspdma);
724
725
726
727
728
729 pring = phba->sli4_hba.nvmels_wq->pring;
730 spin_lock_irq(&phba->hbalock);
731 spin_lock(&pring->ring_lock);
732 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
733 if (wqe->context2 == pnvme_lsreq) {
734 wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
735 foundit = true;
736 break;
737 }
738 }
739 spin_unlock(&pring->ring_lock);
740
741 if (foundit)
742 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
743 spin_unlock_irq(&phba->hbalock);
744
745 if (foundit)
746 return 0;
747
748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
749 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
750 pnvme_lsreq);
751 return -EINVAL;
752}
753
754static int
755lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
756 struct nvme_fc_remote_port *remoteport,
757 struct nvmefc_ls_rsp *ls_rsp)
758{
759 struct lpfc_async_xchg_ctx *axchg =
760 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
761 struct lpfc_nvme_lport *lport;
762 int rc;
763
764 if (axchg->phba->pport->load_flag & FC_UNLOADING)
765 return -ENODEV;
766
767 lport = (struct lpfc_nvme_lport *)localport->private;
768
769 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
770
771 if (rc) {
772
773
774
775
776
777 if (rc != -EALREADY)
778 atomic_inc(&lport->xmt_ls_abort);
779 return rc;
780 }
781
782 return 0;
783}
784
785
786
787
788
789
790
791
792
793
794static void
795lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
796 struct nvme_fc_remote_port *pnvme_rport,
797 struct nvmefc_ls_req *pnvme_lsreq)
798{
799 struct lpfc_nvme_lport *lport;
800 struct lpfc_vport *vport;
801 struct lpfc_nodelist *ndlp;
802 int ret;
803
804 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
805 if (unlikely(!lport))
806 return;
807 vport = lport->vport;
808
809 if (vport->load_flag & FC_UNLOADING)
810 return;
811
812 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
813
814 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
815 if (!ret)
816 atomic_inc(&lport->xmt_ls_abort);
817}
818
819
820static inline void
821lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
822 struct lpfc_io_buf *lpfc_ncmd,
823 struct nvmefc_fcp_req *nCmd)
824{
825 struct lpfc_hba *phba = vport->phba;
826 struct sli4_sge *sgl;
827 union lpfc_wqe128 *wqe;
828 uint32_t *wptr, *dptr;
829
830
831
832
833
834
835
836 wqe = &lpfc_ncmd->cur_iocbq.wqe;
837
838
839
840
841
842
843
844
845 sgl = lpfc_ncmd->dma_sgl;
846 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
847 if (phba->cfg_nvme_embed_cmd) {
848 sgl->addr_hi = 0;
849 sgl->addr_lo = 0;
850
851
852 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
853 wqe->generic.bde.tus.f.bdeSize = 56;
854 wqe->generic.bde.addrHigh = 0;
855 wqe->generic.bde.addrLow = 64;
856
857
858
859
860
861
862
863
864
865
866
867 wptr = &wqe->words[16];
868 dptr = (uint32_t *)nCmd->cmdaddr;
869 dptr++;
870
871 *wptr++ = *dptr++;
872 *wptr++ = *dptr++;
873 *wptr++ = *dptr++;
874 *wptr++ = *dptr++;
875 dptr++;
876 *wptr++ = *dptr++;
877 *wptr++ = *dptr++;
878 dptr += 8;
879 *wptr++ = *dptr++;
880 *wptr++ = *dptr++;
881 *wptr++ = *dptr++;
882 *wptr++ = *dptr++;
883 *wptr++ = *dptr++;
884 *wptr++ = *dptr++;
885 *wptr++ = *dptr++;
886 *wptr = *dptr;
887 } else {
888 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
889 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
890
891
892 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
893 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
894 wqe->generic.bde.addrHigh = sgl->addr_hi;
895 wqe->generic.bde.addrLow = sgl->addr_lo;
896
897
898 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
899 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
900 }
901
902 sgl++;
903
904
905 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
906 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
907 sgl->word2 = le32_to_cpu(sgl->word2);
908 if (nCmd->sg_cnt)
909 bf_set(lpfc_sli4_sge_last, sgl, 0);
910 else
911 bf_set(lpfc_sli4_sge_last, sgl, 1);
912 sgl->word2 = cpu_to_le32(sgl->word2);
913 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
914}
915
916
917
918
919
920
921
922
923
924
925
926
927
928static void
929lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
930 struct lpfc_iocbq *pwqeOut)
931{
932 struct lpfc_io_buf *lpfc_ncmd =
933 (struct lpfc_io_buf *)pwqeIn->context1;
934 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
935 struct lpfc_vport *vport = pwqeIn->vport;
936 struct nvmefc_fcp_req *nCmd;
937 struct nvme_fc_ersp_iu *ep;
938 struct nvme_fc_cmd_iu *cp;
939 struct lpfc_nodelist *ndlp;
940 struct lpfc_nvme_fcpreq_priv *freqpriv;
941 struct lpfc_nvme_lport *lport;
942 uint32_t code, status, idx;
943 uint16_t cid, sqhd, data;
944 uint32_t *ptr;
945 uint32_t lat;
946 bool call_done = false;
947#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
948 int cpu;
949#endif
950 int offline = 0;
951
952
953 if (!lpfc_ncmd) {
954 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
955 "6071 Null lpfc_ncmd pointer. No "
956 "release, skip completion\n");
957 return;
958 }
959
960
961 spin_lock(&lpfc_ncmd->buf_lock);
962
963 if (!lpfc_ncmd->nvmeCmd) {
964 spin_unlock(&lpfc_ncmd->buf_lock);
965 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
966 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
967 "nvmeCmd x%px\n",
968 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
969
970
971 lpfc_release_nvme_buf(phba, lpfc_ncmd);
972 return;
973 }
974 nCmd = lpfc_ncmd->nvmeCmd;
975 status = bf_get(lpfc_wcqe_c_status, wcqe);
976
977 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
978 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
979
980 if (unlikely(status && vport->localport)) {
981 lport = (struct lpfc_nvme_lport *)vport->localport->private;
982 if (lport) {
983 if (bf_get(lpfc_wcqe_c_xb, wcqe))
984 atomic_inc(&lport->cmpl_fcp_xb);
985 atomic_inc(&lport->cmpl_fcp_err);
986 }
987 }
988
989 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
990 lpfc_ncmd->cur_iocbq.sli4_xritag,
991 status, wcqe->parameter);
992
993
994
995
996 ndlp = lpfc_ncmd->ndlp;
997 if (!ndlp) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
999 "6062 Ignoring NVME cmpl. No ndlp\n");
1000 goto out_err;
1001 }
1002
1003 code = bf_get(lpfc_wcqe_c_code, wcqe);
1004 if (code == CQE_CODE_NVME_ERSP) {
1005
1006 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1007
1008
1009
1010
1011
1012 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1013 cid = cp->sqe.common.command_id;
1014
1015
1016
1017
1018
1019
1020
1021 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1022
1023
1024 ep->iu_len = cpu_to_be16(8);
1025 ep->rsn = wcqe->parameter;
1026 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1027 ep->rsvd12 = 0;
1028 ptr = (uint32_t *)&ep->cqe.result.u64;
1029 *ptr++ = wcqe->total_data_placed;
1030 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1031 *ptr = (uint32_t)data;
1032 ep->cqe.sq_head = sqhd;
1033 ep->cqe.sq_id = nCmd->sqid;
1034 ep->cqe.command_id = cid;
1035 ep->cqe.status = 0;
1036
1037 lpfc_ncmd->status = IOSTAT_SUCCESS;
1038 lpfc_ncmd->result = 0;
1039 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1040 nCmd->transferred_length = nCmd->payload_length;
1041 } else {
1042 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1043 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 switch (lpfc_ncmd->status) {
1057 case IOSTAT_SUCCESS:
1058 nCmd->transferred_length = wcqe->total_data_placed;
1059 nCmd->rcv_rsplen = 0;
1060 nCmd->status = 0;
1061 break;
1062 case IOSTAT_FCP_RSP_ERROR:
1063 nCmd->transferred_length = wcqe->total_data_placed;
1064 nCmd->rcv_rsplen = wcqe->parameter;
1065 nCmd->status = 0;
1066
1067
1068 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1069 lpfc_ncmd->status = IOSTAT_SUCCESS;
1070 lpfc_ncmd->result = 0;
1071
1072 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1073 "6084 NVME Completion ERSP: "
1074 "xri %x placed x%x\n",
1075 lpfc_ncmd->cur_iocbq.sli4_xritag,
1076 wcqe->total_data_placed);
1077 break;
1078 }
1079 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1080 "6081 NVME Completion Protocol Error: "
1081 "xri %x status x%x result x%x "
1082 "placed x%x\n",
1083 lpfc_ncmd->cur_iocbq.sli4_xritag,
1084 lpfc_ncmd->status, lpfc_ncmd->result,
1085 wcqe->total_data_placed);
1086 break;
1087 case IOSTAT_LOCAL_REJECT:
1088
1089 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1090 lpfc_printf_vlog(vport, KERN_INFO,
1091 LOG_NVME_IOERR,
1092 "6032 Delay Aborted cmd x%px "
1093 "nvme cmd x%px, xri x%x, "
1094 "xb %d\n",
1095 lpfc_ncmd, nCmd,
1096 lpfc_ncmd->cur_iocbq.sli4_xritag,
1097 bf_get(lpfc_wcqe_c_xb, wcqe));
1098 fallthrough;
1099 default:
1100out_err:
1101 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1102 "6072 NVME Completion Error: xri %x "
1103 "status x%x result x%x [x%x] "
1104 "placed x%x\n",
1105 lpfc_ncmd->cur_iocbq.sli4_xritag,
1106 lpfc_ncmd->status, lpfc_ncmd->result,
1107 wcqe->parameter,
1108 wcqe->total_data_placed);
1109 nCmd->transferred_length = 0;
1110 nCmd->rcv_rsplen = 0;
1111 nCmd->status = NVME_SC_INTERNAL;
1112 offline = pci_channel_offline(vport->phba->pcidev);
1113 }
1114 }
1115
1116
1117 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
1118 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1119 else
1120 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1121
1122
1123
1124
1125
1126#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1127 if (lpfc_ncmd->ts_cmd_start) {
1128 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1129 lpfc_ncmd->ts_data_io = ktime_get_ns();
1130 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1131 lpfc_io_ktime(phba, lpfc_ncmd);
1132 }
1133 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1134 cpu = raw_smp_processor_id();
1135 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1136 if (lpfc_ncmd->cpu != cpu)
1137 lpfc_printf_vlog(vport,
1138 KERN_INFO, LOG_NVME_IOERR,
1139 "6701 CPU Check cmpl: "
1140 "cpu %d expect %d\n",
1141 cpu, lpfc_ncmd->cpu);
1142 }
1143#endif
1144
1145
1146
1147
1148
1149 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1150 freqpriv = nCmd->private;
1151 freqpriv->nvme_buf = NULL;
1152 lpfc_ncmd->nvmeCmd = NULL;
1153 call_done = true;
1154 }
1155 spin_unlock(&lpfc_ncmd->buf_lock);
1156
1157
1158 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1159 nCmd->io_dir == NVMEFC_FCP_READ &&
1160 nCmd->payload_length) {
1161
1162 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
1163 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
1164 }
1165
1166 if (call_done)
1167 nCmd->done(nCmd);
1168
1169
1170 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static int
1190lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1191 struct lpfc_io_buf *lpfc_ncmd,
1192 struct lpfc_nodelist *pnode,
1193 struct lpfc_fc4_ctrl_stat *cstat)
1194{
1195 struct lpfc_hba *phba = vport->phba;
1196 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1197 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1198 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1199 uint32_t req_len;
1200
1201
1202
1203
1204
1205 if (nCmd->sg_cnt) {
1206 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1207
1208 memcpy(&wqe->words[7],
1209 &lpfc_iwrite_cmd_template.words[7],
1210 sizeof(uint32_t) * 5);
1211
1212
1213 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1214
1215
1216 if ((phba->cfg_nvme_enable_fb) &&
1217 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1218 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1219 if (req_len < pnode->nvme_fb_size)
1220 wqe->fcp_iwrite.initial_xfer_len =
1221 req_len;
1222 else
1223 wqe->fcp_iwrite.initial_xfer_len =
1224 pnode->nvme_fb_size;
1225 } else {
1226 wqe->fcp_iwrite.initial_xfer_len = 0;
1227 }
1228 cstat->output_requests++;
1229 } else {
1230
1231 memcpy(&wqe->words[7],
1232 &lpfc_iread_cmd_template.words[7],
1233 sizeof(uint32_t) * 5);
1234
1235
1236 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1237
1238
1239 wqe->fcp_iread.rsrvd5 = 0;
1240
1241
1242 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
1243 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1244 LPFC_WQE_IOD_NONE);
1245 cstat->input_requests++;
1246 }
1247 } else {
1248
1249 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1250 sizeof(uint32_t) * 8);
1251 cstat->control_requests++;
1252 }
1253
1254 if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1255 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1256
1257
1258
1259
1260
1261
1262 bf_set(payload_offset_len, &wqe->fcp_icmd,
1263 (nCmd->rsplen + nCmd->cmdlen));
1264
1265
1266 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1267 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1268 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1269
1270
1271 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1272
1273
1274 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1275
1276
1277 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1278
1279
1280
1281 pwqeq->vport = vport;
1282 return 0;
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static int
1300lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1301 struct lpfc_io_buf *lpfc_ncmd)
1302{
1303 struct lpfc_hba *phba = vport->phba;
1304 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1305 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1306 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1307 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1308 struct scatterlist *data_sg;
1309 struct sli4_sge *first_data_sgl;
1310 struct ulp_bde64 *bde;
1311 dma_addr_t physaddr = 0;
1312 uint32_t dma_len = 0;
1313 uint32_t dma_offset = 0;
1314 int nseg, i, j;
1315 bool lsp_just_set = false;
1316
1317
1318 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1319
1320
1321
1322
1323
1324 if (nCmd->sg_cnt) {
1325
1326
1327
1328
1329 sgl += 2;
1330
1331 first_data_sgl = sgl;
1332 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1333 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1335 "6058 Too many sg segments from "
1336 "NVME Transport. Max %d, "
1337 "nvmeIO sg_cnt %d\n",
1338 phba->cfg_nvme_seg_cnt + 1,
1339 lpfc_ncmd->seg_cnt);
1340 lpfc_ncmd->seg_cnt = 0;
1341 return 1;
1342 }
1343
1344
1345
1346
1347
1348
1349
1350 nseg = nCmd->sg_cnt;
1351 data_sg = nCmd->first_sgl;
1352
1353
1354 j = 2;
1355 for (i = 0; i < nseg; i++) {
1356 if (data_sg == NULL) {
1357 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1358 "6059 dptr err %d, nseg %d\n",
1359 i, nseg);
1360 lpfc_ncmd->seg_cnt = 0;
1361 return 1;
1362 }
1363
1364 sgl->word2 = 0;
1365 if (nseg == 1) {
1366 bf_set(lpfc_sli4_sge_last, sgl, 1);
1367 bf_set(lpfc_sli4_sge_type, sgl,
1368 LPFC_SGE_TYPE_DATA);
1369 } else {
1370 bf_set(lpfc_sli4_sge_last, sgl, 0);
1371
1372
1373 if (!lsp_just_set &&
1374 !((j + 1) % phba->border_sge_num) &&
1375 ((nseg - 1) != i)) {
1376
1377 bf_set(lpfc_sli4_sge_type, sgl,
1378 LPFC_SGE_TYPE_LSP);
1379
1380 sgl_xtra = lpfc_get_sgl_per_hdwq(
1381 phba, lpfc_ncmd);
1382
1383 if (unlikely(!sgl_xtra)) {
1384 lpfc_ncmd->seg_cnt = 0;
1385 return 1;
1386 }
1387 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1388 sgl_xtra->dma_phys_sgl));
1389 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1390 sgl_xtra->dma_phys_sgl));
1391
1392 } else {
1393 bf_set(lpfc_sli4_sge_type, sgl,
1394 LPFC_SGE_TYPE_DATA);
1395 }
1396 }
1397
1398 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1399 LPFC_SGE_TYPE_LSP)) {
1400 if ((nseg - 1) == i)
1401 bf_set(lpfc_sli4_sge_last, sgl, 1);
1402
1403 physaddr = data_sg->dma_address;
1404 dma_len = data_sg->length;
1405 sgl->addr_lo = cpu_to_le32(
1406 putPaddrLow(physaddr));
1407 sgl->addr_hi = cpu_to_le32(
1408 putPaddrHigh(physaddr));
1409
1410 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1411 sgl->word2 = cpu_to_le32(sgl->word2);
1412 sgl->sge_len = cpu_to_le32(dma_len);
1413
1414 dma_offset += dma_len;
1415 data_sg = sg_next(data_sg);
1416
1417 sgl++;
1418
1419 lsp_just_set = false;
1420 } else {
1421 sgl->word2 = cpu_to_le32(sgl->word2);
1422
1423 sgl->sge_len = cpu_to_le32(
1424 phba->cfg_sg_dma_buf_size);
1425
1426 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1427 i = i - 1;
1428
1429 lsp_just_set = true;
1430 }
1431
1432 j++;
1433 }
1434
1435
1436 if (nseg == 1 && phba->cfg_enable_pbde) {
1437
1438 bde = (struct ulp_bde64 *)
1439 &wqe->words[13];
1440 bde->addrLow = first_data_sgl->addr_lo;
1441 bde->addrHigh = first_data_sgl->addr_hi;
1442 bde->tus.f.bdeSize =
1443 le32_to_cpu(first_data_sgl->sge_len);
1444 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1445 bde->tus.w = cpu_to_le32(bde->tus.w);
1446
1447
1448 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1449 } else {
1450 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1451
1452 }
1453
1454 } else {
1455 lpfc_ncmd->seg_cnt = 0;
1456
1457
1458
1459
1460 if (nCmd->payload_length != 0) {
1461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1462 "6063 NVME DMA Prep Err: sg_cnt %d "
1463 "payload_length x%x\n",
1464 nCmd->sg_cnt, nCmd->payload_length);
1465 return 1;
1466 }
1467 }
1468 return 0;
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static int
1487lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1488 struct nvme_fc_remote_port *pnvme_rport,
1489 void *hw_queue_handle,
1490 struct nvmefc_fcp_req *pnvme_fcreq)
1491{
1492 int ret = 0;
1493 int expedite = 0;
1494 int idx, cpu;
1495 struct lpfc_nvme_lport *lport;
1496 struct lpfc_fc4_ctrl_stat *cstat;
1497 struct lpfc_vport *vport;
1498 struct lpfc_hba *phba;
1499 struct lpfc_nodelist *ndlp;
1500 struct lpfc_io_buf *lpfc_ncmd;
1501 struct lpfc_nvme_rport *rport;
1502 struct lpfc_nvme_qhandle *lpfc_queue_info;
1503 struct lpfc_nvme_fcpreq_priv *freqpriv;
1504 struct nvme_common_command *sqe;
1505 uint64_t start = 0;
1506
1507
1508
1509
1510 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1511 if (unlikely(!lport)) {
1512 ret = -EINVAL;
1513 goto out_fail;
1514 }
1515
1516 vport = lport->vport;
1517
1518 if (unlikely(!hw_queue_handle)) {
1519 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1520 "6117 Fail IO, NULL hw_queue_handle\n");
1521 atomic_inc(&lport->xmt_fcp_err);
1522 ret = -EBUSY;
1523 goto out_fail;
1524 }
1525
1526 phba = vport->phba;
1527
1528 if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
1529 phba->hba_flag & HBA_IOQ_FLUSH) {
1530 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1531 "6124 Fail IO, Driver unload\n");
1532 atomic_inc(&lport->xmt_fcp_err);
1533 ret = -ENODEV;
1534 goto out_fail;
1535 }
1536
1537 freqpriv = pnvme_fcreq->private;
1538 if (unlikely(!freqpriv)) {
1539 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1540 "6158 Fail IO, NULL request data\n");
1541 atomic_inc(&lport->xmt_fcp_err);
1542 ret = -EINVAL;
1543 goto out_fail;
1544 }
1545
1546#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1547 if (phba->ktime_on)
1548 start = ktime_get_ns();
1549#endif
1550 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1551 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1552
1553
1554
1555
1556
1557 ndlp = rport->ndlp;
1558 if (!ndlp) {
1559 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1560 "6053 Busy IO, ndlp not ready: rport x%px "
1561 "ndlp x%px, DID x%06x\n",
1562 rport, ndlp, pnvme_rport->port_id);
1563 atomic_inc(&lport->xmt_fcp_err);
1564 ret = -EBUSY;
1565 goto out_fail;
1566 }
1567
1568
1569 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1570 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1572 "6036 Fail IO, DID x%06x not ready for "
1573 "IO. State x%x, Type x%x Flg x%x\n",
1574 pnvme_rport->port_id,
1575 ndlp->nlp_state, ndlp->nlp_type,
1576 ndlp->fc4_xpt_flags);
1577 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1578 ret = -EBUSY;
1579 goto out_fail;
1580
1581 }
1582
1583
1584
1585
1586
1587 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1588 sqe = &((struct nvme_fc_cmd_iu *)
1589 pnvme_fcreq->cmdaddr)->sqe.common;
1590 if (sqe->opcode == nvme_admin_keep_alive)
1591 expedite = 1;
1592 }
1593
1594
1595 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1596 pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
1597 pnvme_fcreq->payload_length) {
1598 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
1599 if (ret) {
1600 ret = -EBUSY;
1601 goto out_fail;
1602 }
1603
1604 start = ktime_get_ns();
1605 }
1606
1607
1608
1609
1610 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1611 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1612 !expedite) {
1613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1614 "6174 Fail IO, ndlp qdepth exceeded: "
1615 "idx %d DID %x pend %d qdepth %d\n",
1616 lpfc_queue_info->index, ndlp->nlp_DID,
1617 atomic_read(&ndlp->cmd_pending),
1618 ndlp->cmd_qdepth);
1619 atomic_inc(&lport->xmt_fcp_qdepth);
1620 ret = -EBUSY;
1621 goto out_fail1;
1622 }
1623 }
1624
1625
1626 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1627 idx = lpfc_queue_info->index;
1628 } else {
1629 cpu = raw_smp_processor_id();
1630 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1631 }
1632
1633 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1634 if (lpfc_ncmd == NULL) {
1635 atomic_inc(&lport->xmt_fcp_noxri);
1636 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1637 "6065 Fail IO, driver buffer pool is empty: "
1638 "idx %d DID %x\n",
1639 lpfc_queue_info->index, ndlp->nlp_DID);
1640 ret = -EBUSY;
1641 goto out_fail1;
1642 }
1643#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1644 if (start) {
1645 lpfc_ncmd->ts_cmd_start = start;
1646 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1647 } else {
1648 lpfc_ncmd->ts_cmd_start = 0;
1649 }
1650#endif
1651 lpfc_ncmd->rx_cmd_start = start;
1652
1653
1654
1655
1656
1657
1658
1659 freqpriv->nvme_buf = lpfc_ncmd;
1660 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1661 lpfc_ncmd->ndlp = ndlp;
1662 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1673 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1674
1675 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1676 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1677 if (ret) {
1678 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1679 "6175 Fail IO, Prep DMA: "
1680 "idx %d DID %x\n",
1681 lpfc_queue_info->index, ndlp->nlp_DID);
1682 atomic_inc(&lport->xmt_fcp_err);
1683 ret = -ENOMEM;
1684 goto out_free_nvme_buf;
1685 }
1686
1687 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1688 lpfc_ncmd->cur_iocbq.sli4_xritag,
1689 lpfc_queue_info->index, ndlp->nlp_DID);
1690
1691 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1692 if (ret) {
1693 atomic_inc(&lport->xmt_fcp_wqerr);
1694 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1695 "6113 Fail IO, Could not issue WQE err %x "
1696 "sid: x%x did: x%x oxid: x%x\n",
1697 ret, vport->fc_myDID, ndlp->nlp_DID,
1698 lpfc_ncmd->cur_iocbq.sli4_xritag);
1699 goto out_free_nvme_buf;
1700 }
1701
1702 if (phba->cfg_xri_rebalancing)
1703 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1704
1705#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1706 if (lpfc_ncmd->ts_cmd_start)
1707 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1708
1709 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1710 cpu = raw_smp_processor_id();
1711 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1712 lpfc_ncmd->cpu = cpu;
1713 if (idx != cpu)
1714 lpfc_printf_vlog(vport,
1715 KERN_INFO, LOG_NVME_IOERR,
1716 "6702 CPU Check cmd: "
1717 "cpu %d wq %d\n",
1718 lpfc_ncmd->cpu,
1719 lpfc_queue_info->index);
1720 }
1721#endif
1722 return 0;
1723
1724 out_free_nvme_buf:
1725 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1726 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1727 cstat->output_requests--;
1728 else
1729 cstat->input_requests--;
1730 } else
1731 cstat->control_requests--;
1732 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1733 out_fail1:
1734 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
1735 pnvme_fcreq->payload_length, NULL);
1736 out_fail:
1737 return ret;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751void
1752lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1753 struct lpfc_wcqe_complete *abts_cmpl)
1754{
1755 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1756 "6145 ABORT_XRI_CN completing on rpi x%x "
1757 "original iotag x%x, abort cmd iotag x%x "
1758 "req_tag x%x, status x%x, hwstatus x%x\n",
1759 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
1760 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
1761 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1762 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1763 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1764 lpfc_sli_release_iocbq(phba, cmdiocb);
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void
1784lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1785 struct nvme_fc_remote_port *pnvme_rport,
1786 void *hw_queue_handle,
1787 struct nvmefc_fcp_req *pnvme_fcreq)
1788{
1789 struct lpfc_nvme_lport *lport;
1790 struct lpfc_vport *vport;
1791 struct lpfc_hba *phba;
1792 struct lpfc_io_buf *lpfc_nbuf;
1793 struct lpfc_iocbq *nvmereq_wqe;
1794 struct lpfc_nvme_fcpreq_priv *freqpriv;
1795 unsigned long flags;
1796 int ret_val;
1797
1798
1799
1800
1801 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1802 if (unlikely(!lport))
1803 return;
1804
1805 vport = lport->vport;
1806
1807 if (unlikely(!hw_queue_handle)) {
1808 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1809 "6129 Fail Abort, HW Queue Handle NULL.\n");
1810 return;
1811 }
1812
1813 phba = vport->phba;
1814 freqpriv = pnvme_fcreq->private;
1815
1816 if (unlikely(!freqpriv))
1817 return;
1818 if (vport->load_flag & FC_UNLOADING)
1819 return;
1820
1821
1822 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1823 "6002 Abort Request to rport DID x%06x "
1824 "for nvme_fc_req x%px\n",
1825 pnvme_rport->port_id,
1826 pnvme_fcreq);
1827
1828
1829
1830
1831 spin_lock_irqsave(&phba->hbalock, flags);
1832
1833 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1834 spin_unlock_irqrestore(&phba->hbalock, flags);
1835 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1836 "6139 Driver in reset cleanup - flushing "
1837 "NVME Req now. hba_flag x%x\n",
1838 phba->hba_flag);
1839 return;
1840 }
1841
1842 lpfc_nbuf = freqpriv->nvme_buf;
1843 if (!lpfc_nbuf) {
1844 spin_unlock_irqrestore(&phba->hbalock, flags);
1845 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1846 "6140 NVME IO req has no matching lpfc nvme "
1847 "io buffer. Skipping abort req.\n");
1848 return;
1849 } else if (!lpfc_nbuf->nvmeCmd) {
1850 spin_unlock_irqrestore(&phba->hbalock, flags);
1851 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1852 "6141 lpfc NVME IO req has no nvme_fcreq "
1853 "io buffer. Skipping abort req.\n");
1854 return;
1855 }
1856 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1857
1858
1859 spin_lock(&lpfc_nbuf->buf_lock);
1860
1861
1862
1863
1864
1865
1866
1867
1868 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1869 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1870 "6143 NVME req mismatch: "
1871 "lpfc_nbuf x%px nvmeCmd x%px, "
1872 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1873 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1874 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1875 goto out_unlock;
1876 }
1877
1878
1879 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
1880 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1881 "6142 NVME IO req x%px not queued - skipping "
1882 "abort req xri x%x\n",
1883 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1884 goto out_unlock;
1885 }
1886
1887 atomic_inc(&lport->xmt_fcp_abort);
1888 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1889 nvmereq_wqe->sli4_xritag,
1890 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1891
1892
1893 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
1894 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1895 "6144 Outstanding NVME I/O Abort Request "
1896 "still pending on nvme_fcreq x%px, "
1897 "lpfc_ncmd x%px xri x%x\n",
1898 pnvme_fcreq, lpfc_nbuf,
1899 nvmereq_wqe->sli4_xritag);
1900 goto out_unlock;
1901 }
1902
1903 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1904 lpfc_nvme_abort_fcreq_cmpl);
1905
1906 spin_unlock(&lpfc_nbuf->buf_lock);
1907 spin_unlock_irqrestore(&phba->hbalock, flags);
1908
1909
1910 lpfc_issue_hb_tmo(phba);
1911
1912 if (ret_val != WQE_SUCCESS) {
1913 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1914 "6137 Failed abts issue_wqe with status x%x "
1915 "for nvme_fcreq x%px.\n",
1916 ret_val, pnvme_fcreq);
1917 return;
1918 }
1919
1920 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1921 "6138 Transport Abort NVME Request Issued for "
1922 "ox_id x%x\n",
1923 nvmereq_wqe->sli4_xritag);
1924 return;
1925
1926out_unlock:
1927 spin_unlock(&lpfc_nbuf->buf_lock);
1928 spin_unlock_irqrestore(&phba->hbalock, flags);
1929 return;
1930}
1931
1932
1933static struct nvme_fc_port_template lpfc_nvme_template = {
1934
1935 .localport_delete = lpfc_nvme_localport_delete,
1936 .remoteport_delete = lpfc_nvme_remoteport_delete,
1937 .create_queue = lpfc_nvme_create_queue,
1938 .delete_queue = lpfc_nvme_delete_queue,
1939 .ls_req = lpfc_nvme_ls_req,
1940 .fcp_io = lpfc_nvme_fcp_io_submit,
1941 .ls_abort = lpfc_nvme_ls_abort,
1942 .fcp_abort = lpfc_nvme_fcp_abort,
1943 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
1944
1945 .max_hw_queues = 1,
1946 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1947 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1948 .dma_boundary = 0xFFFFFFFF,
1949
1950
1951
1952
1953 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1954 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1955 .lsrqst_priv_sz = 0,
1956 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1957};
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static struct lpfc_io_buf *
1970lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1971 int idx, int expedite)
1972{
1973 struct lpfc_io_buf *lpfc_ncmd;
1974 struct lpfc_sli4_hdw_queue *qp;
1975 struct sli4_sge *sgl;
1976 struct lpfc_iocbq *pwqeq;
1977 union lpfc_wqe128 *wqe;
1978
1979 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1980
1981 if (lpfc_ncmd) {
1982 pwqeq = &(lpfc_ncmd->cur_iocbq);
1983 wqe = &pwqeq->wqe;
1984
1985
1986
1987
1988 pwqeq->cmd_flag = LPFC_IO_NVME;
1989 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
1990 lpfc_ncmd->start_time = jiffies;
1991 lpfc_ncmd->flags = 0;
1992
1993
1994
1995
1996
1997 sgl = lpfc_ncmd->dma_sgl;
1998 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1999 bf_set(lpfc_sli4_sge_last, sgl, 0);
2000 sgl->word2 = cpu_to_le32(sgl->word2);
2001
2002
2003
2004 memset(wqe, 0, sizeof(union lpfc_wqe));
2005
2006 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2007 atomic_inc(&ndlp->cmd_pending);
2008 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2009 }
2010
2011 } else {
2012 qp = &phba->sli4_hba.hdwq[idx];
2013 qp->empty_io_bufs++;
2014 }
2015
2016 return lpfc_ncmd;
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static void
2030lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2031{
2032 struct lpfc_sli4_hdw_queue *qp;
2033 unsigned long iflag = 0;
2034
2035 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2036 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2037
2038 lpfc_ncmd->ndlp = NULL;
2039 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2040
2041 qp = lpfc_ncmd->hdwq;
2042 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2043 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2044 "6310 XB release deferred for "
2045 "ox_id x%x on reqtag x%x\n",
2046 lpfc_ncmd->cur_iocbq.sli4_xritag,
2047 lpfc_ncmd->cur_iocbq.iotag);
2048
2049 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2050 list_add_tail(&lpfc_ncmd->list,
2051 &qp->lpfc_abts_io_buf_list);
2052 qp->abts_nvme_io_bufs++;
2053 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2054 } else
2055 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074int
2075lpfc_nvme_create_localport(struct lpfc_vport *vport)
2076{
2077 int ret = 0;
2078 struct lpfc_hba *phba = vport->phba;
2079 struct nvme_fc_port_info nfcp_info;
2080 struct nvme_fc_local_port *localport;
2081 struct lpfc_nvme_lport *lport;
2082
2083
2084
2085
2086 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2087 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2088 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2089 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2090
2091
2092
2093
2094
2095 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2096
2097
2098
2099
2100 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2101
2102 if (!IS_ENABLED(CONFIG_NVME_FC))
2103 return ret;
2104
2105
2106
2107
2108
2109 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2110 &vport->phba->pcidev->dev, &localport);
2111 if (!ret) {
2112 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2113 "6005 Successfully registered local "
2114 "NVME port num %d, localP x%px, private "
2115 "x%px, sg_seg %d\n",
2116 localport->port_num, localport,
2117 localport->private,
2118 lpfc_nvme_template.max_sgl_segments);
2119
2120
2121 lport = (struct lpfc_nvme_lport *)localport->private;
2122 vport->localport = localport;
2123 lport->vport = vport;
2124 vport->nvmei_support = 1;
2125
2126 atomic_set(&lport->xmt_fcp_noxri, 0);
2127 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2128 atomic_set(&lport->xmt_fcp_qdepth, 0);
2129 atomic_set(&lport->xmt_fcp_err, 0);
2130 atomic_set(&lport->xmt_fcp_wqerr, 0);
2131 atomic_set(&lport->xmt_fcp_abort, 0);
2132 atomic_set(&lport->xmt_ls_abort, 0);
2133 atomic_set(&lport->xmt_ls_err, 0);
2134 atomic_set(&lport->cmpl_fcp_xb, 0);
2135 atomic_set(&lport->cmpl_fcp_err, 0);
2136 atomic_set(&lport->cmpl_ls_xb, 0);
2137 atomic_set(&lport->cmpl_ls_err, 0);
2138
2139 atomic_set(&lport->fc4NvmeLsRequests, 0);
2140 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2141 }
2142
2143 return ret;
2144}
2145
2146#if (IS_ENABLED(CONFIG_NVME_FC))
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static void
2158lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2159 struct lpfc_nvme_lport *lport,
2160 struct completion *lport_unreg_cmp)
2161{
2162 u32 wait_tmo;
2163 int ret, i, pending = 0;
2164 struct lpfc_sli_ring *pring;
2165 struct lpfc_hba *phba = vport->phba;
2166 struct lpfc_sli4_hdw_queue *qp;
2167 int abts_scsi, abts_nvme;
2168
2169
2170
2171
2172
2173 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2174 while (true) {
2175 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2176 if (unlikely(!ret)) {
2177 pending = 0;
2178 abts_scsi = 0;
2179 abts_nvme = 0;
2180 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2181 qp = &phba->sli4_hba.hdwq[i];
2182 if (!vport->localport || !qp || !qp->io_wq)
2183 return;
2184
2185 pring = qp->io_wq->pring;
2186 if (!pring)
2187 continue;
2188 pending += pring->txcmplq_cnt;
2189 abts_scsi += qp->abts_scsi_io_bufs;
2190 abts_nvme += qp->abts_nvme_io_bufs;
2191 }
2192 if (!vport->localport ||
2193 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
2194 vport->load_flag & FC_UNLOADING)
2195 return;
2196
2197 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2198 "6176 Lport x%px Localport x%px wait "
2199 "timed out. Pending %d [%d:%d]. "
2200 "Renewing.\n",
2201 lport, vport->localport, pending,
2202 abts_scsi, abts_nvme);
2203 continue;
2204 }
2205 break;
2206 }
2207 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2208 "6177 Lport x%px Localport x%px Complete Success\n",
2209 lport, vport->localport);
2210}
2211#endif
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223void
2224lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2225{
2226#if (IS_ENABLED(CONFIG_NVME_FC))
2227 struct nvme_fc_local_port *localport;
2228 struct lpfc_nvme_lport *lport;
2229 int ret;
2230 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2231
2232 if (vport->nvmei_support == 0)
2233 return;
2234
2235 localport = vport->localport;
2236 if (!localport)
2237 return;
2238 lport = (struct lpfc_nvme_lport *)localport->private;
2239
2240 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2241 "6011 Destroying NVME localport x%px\n",
2242 localport);
2243
2244
2245
2246
2247 lport->lport_unreg_cmp = &lport_unreg_cmp;
2248 ret = nvme_fc_unregister_localport(localport);
2249
2250
2251
2252
2253 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2254 vport->localport = NULL;
2255
2256
2257
2258
2259
2260 vport->nvmei_support = 0;
2261 if (ret == 0) {
2262 lpfc_printf_vlog(vport,
2263 KERN_INFO, LOG_NVME_DISC,
2264 "6009 Unregistered lport Success\n");
2265 } else {
2266 lpfc_printf_vlog(vport,
2267 KERN_INFO, LOG_NVME_DISC,
2268 "6010 Unregistered lport "
2269 "Failed, status x%x\n",
2270 ret);
2271 }
2272#endif
2273}
2274
2275void
2276lpfc_nvme_update_localport(struct lpfc_vport *vport)
2277{
2278#if (IS_ENABLED(CONFIG_NVME_FC))
2279 struct nvme_fc_local_port *localport;
2280 struct lpfc_nvme_lport *lport;
2281
2282 localport = vport->localport;
2283 if (!localport) {
2284 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2285 "6710 Update NVME fail. No localport\n");
2286 return;
2287 }
2288 lport = (struct lpfc_nvme_lport *)localport->private;
2289 if (!lport) {
2290 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2291 "6171 Update NVME fail. localP x%px, No lport\n",
2292 localport);
2293 return;
2294 }
2295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2296 "6012 Update NVME lport x%px did x%x\n",
2297 localport, vport->fc_myDID);
2298
2299 localport->port_id = vport->fc_myDID;
2300 if (localport->port_id == 0)
2301 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2302 else
2303 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2304
2305 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2306 "6030 bound lport x%px to DID x%06x\n",
2307 lport, localport->port_id);
2308#endif
2309}
2310
2311int
2312lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2313{
2314#if (IS_ENABLED(CONFIG_NVME_FC))
2315 int ret = 0;
2316 struct nvme_fc_local_port *localport;
2317 struct lpfc_nvme_lport *lport;
2318 struct lpfc_nvme_rport *rport;
2319 struct lpfc_nvme_rport *oldrport;
2320 struct nvme_fc_remote_port *remote_port;
2321 struct nvme_fc_port_info rpinfo;
2322 struct lpfc_nodelist *prev_ndlp = NULL;
2323 struct fc_rport *srport = ndlp->rport;
2324
2325 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2326 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2327 ndlp->nlp_DID, ndlp->nlp_type);
2328
2329 localport = vport->localport;
2330 if (!localport)
2331 return 0;
2332
2333 lport = (struct lpfc_nvme_lport *)localport->private;
2334
2335
2336
2337
2338
2339
2340
2341 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2342 rpinfo.port_id = ndlp->nlp_DID;
2343 if (ndlp->nlp_type & NLP_NVME_TARGET)
2344 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2345 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2346 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2347
2348 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2349 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2350
2351 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2352 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2353 if (srport)
2354 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2355 else
2356 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2357
2358 spin_lock_irq(&ndlp->lock);
2359 oldrport = lpfc_ndlp_get_nrport(ndlp);
2360 if (oldrport) {
2361 prev_ndlp = oldrport->ndlp;
2362 spin_unlock_irq(&ndlp->lock);
2363 } else {
2364 spin_unlock_irq(&ndlp->lock);
2365 if (!lpfc_nlp_get(ndlp)) {
2366 dev_warn(&vport->phba->pcidev->dev,
2367 "Warning - No node ref - exit register\n");
2368 return 0;
2369 }
2370 }
2371
2372 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2373 if (!ret) {
2374
2375
2376
2377
2378
2379
2380
2381 spin_lock_irq(&ndlp->lock);
2382 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2383 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2384 spin_unlock_irq(&ndlp->lock);
2385 rport = remote_port->private;
2386 if (oldrport) {
2387
2388
2389
2390
2391
2392 spin_lock_irq(&ndlp->lock);
2393 ndlp->nrport = NULL;
2394 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2395 spin_unlock_irq(&ndlp->lock);
2396 rport->ndlp = NULL;
2397 rport->remoteport = NULL;
2398
2399
2400
2401
2402
2403 if (prev_ndlp && prev_ndlp != ndlp) {
2404 if (!prev_ndlp->nrport)
2405 lpfc_nlp_put(prev_ndlp);
2406 }
2407 }
2408
2409
2410 rport->remoteport = remote_port;
2411 rport->lport = lport;
2412 rport->ndlp = ndlp;
2413 spin_lock_irq(&ndlp->lock);
2414 ndlp->nrport = rport;
2415 spin_unlock_irq(&ndlp->lock);
2416 lpfc_printf_vlog(vport, KERN_INFO,
2417 LOG_NVME_DISC | LOG_NODE,
2418 "6022 Bind lport x%px to remoteport x%px "
2419 "rport x%px WWNN 0x%llx, "
2420 "Rport WWPN 0x%llx DID "
2421 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2422 lport, remote_port, rport,
2423 rpinfo.node_name, rpinfo.port_name,
2424 rpinfo.port_id, rpinfo.port_role,
2425 ndlp, prev_ndlp);
2426 } else {
2427 lpfc_printf_vlog(vport, KERN_ERR,
2428 LOG_TRACE_EVENT,
2429 "6031 RemotePort Registration failed "
2430 "err: %d, DID x%06x\n",
2431 ret, ndlp->nlp_DID);
2432 }
2433
2434 return ret;
2435#else
2436 return 0;
2437#endif
2438}
2439
2440
2441
2442
2443
2444
2445
2446
2447void
2448lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2449{
2450#if (IS_ENABLED(CONFIG_NVME_FC))
2451 struct lpfc_nvme_rport *nrport;
2452 struct nvme_fc_remote_port *remoteport = NULL;
2453
2454 spin_lock_irq(&ndlp->lock);
2455 nrport = lpfc_ndlp_get_nrport(ndlp);
2456 if (nrport)
2457 remoteport = nrport->remoteport;
2458 spin_unlock_irq(&ndlp->lock);
2459
2460 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2461 "6170 Rescan NPort DID x%06x type x%x "
2462 "state x%x nrport x%px remoteport x%px\n",
2463 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2464 nrport, remoteport);
2465
2466 if (!nrport || !remoteport)
2467 goto rescan_exit;
2468
2469
2470 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2471 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2472 nvme_fc_rescan_remoteport(remoteport);
2473
2474 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2475 "6172 NVME rescanned DID x%06x "
2476 "port_state x%x\n",
2477 ndlp->nlp_DID, remoteport->port_state);
2478 }
2479 return;
2480 rescan_exit:
2481 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2482 "6169 Skip NVME Rport Rescan, NVME remoteport "
2483 "unregistered\n");
2484#endif
2485}
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499void
2500lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2501{
2502#if (IS_ENABLED(CONFIG_NVME_FC))
2503 int ret;
2504 struct nvme_fc_local_port *localport;
2505 struct lpfc_nvme_lport *lport;
2506 struct lpfc_nvme_rport *rport;
2507 struct nvme_fc_remote_port *remoteport = NULL;
2508
2509 localport = vport->localport;
2510
2511
2512
2513
2514 if (!localport)
2515 return;
2516
2517 lport = (struct lpfc_nvme_lport *)localport->private;
2518 if (!lport)
2519 goto input_err;
2520
2521 spin_lock_irq(&ndlp->lock);
2522 rport = lpfc_ndlp_get_nrport(ndlp);
2523 if (rport)
2524 remoteport = rport->remoteport;
2525 spin_unlock_irq(&ndlp->lock);
2526 if (!remoteport)
2527 goto input_err;
2528
2529 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2530 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2531 "port_id x%06x, portstate x%x port type x%x "
2532 "refcnt %d\n",
2533 remoteport, remoteport->port_name,
2534 remoteport->port_id, remoteport->port_state,
2535 ndlp->nlp_type, kref_read(&ndlp->kref));
2536
2537
2538
2539
2540
2541 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2542
2543
2544
2545 spin_lock_irq(&vport->phba->hbalock);
2546 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
2547 spin_unlock_irq(&vport->phba->hbalock);
2548
2549
2550
2551
2552
2553
2554 if (vport->load_flag & FC_UNLOADING)
2555 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2556
2557 ret = nvme_fc_unregister_remoteport(remoteport);
2558
2559
2560
2561
2562
2563
2564 ndlp->nrport = NULL;
2565 lpfc_nlp_put(ndlp);
2566 if (ret != 0) {
2567 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2568 "6167 NVME unregister failed %d "
2569 "port_state x%x\n",
2570 ret, remoteport->port_state);
2571 }
2572 }
2573 return;
2574
2575 input_err:
2576#endif
2577 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2578 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2579 vport->localport, ndlp->rport, ndlp->nlp_DID);
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591void
2592lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
2593 struct lpfc_io_buf *lpfc_ncmd)
2594{
2595 struct nvmefc_fcp_req *nvme_cmd = NULL;
2596
2597 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2598 "6533 %s nvme_cmd %p tag x%x abort complete and "
2599 "xri released\n", __func__,
2600 lpfc_ncmd->nvmeCmd,
2601 lpfc_ncmd->cur_iocbq.iotag);
2602
2603
2604
2605
2606
2607 if (lpfc_ncmd->nvmeCmd) {
2608 nvme_cmd = lpfc_ncmd->nvmeCmd;
2609 nvme_cmd->transferred_length = 0;
2610 nvme_cmd->rcv_rsplen = 0;
2611 nvme_cmd->status = NVME_SC_INTERNAL;
2612 nvme_cmd->done(nvme_cmd);
2613 lpfc_ncmd->nvmeCmd = NULL;
2614 }
2615 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2616}
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628void
2629lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2630 struct sli4_wcqe_xri_aborted *axri,
2631 struct lpfc_io_buf *lpfc_ncmd)
2632{
2633 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2634 struct nvmefc_fcp_req *nvme_cmd = NULL;
2635 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2636
2637
2638 if (ndlp)
2639 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2640
2641 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2642 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2643 "xri released\n",
2644 lpfc_ncmd->nvmeCmd, xri,
2645 lpfc_ncmd->cur_iocbq.iotag);
2646
2647
2648
2649
2650
2651 if (lpfc_ncmd->nvmeCmd) {
2652 nvme_cmd = lpfc_ncmd->nvmeCmd;
2653 nvme_cmd->done(nvme_cmd);
2654 lpfc_ncmd->nvmeCmd = NULL;
2655 }
2656 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2657}
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669void
2670lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2671{
2672 struct lpfc_sli_ring *pring;
2673 u32 i, wait_cnt = 0;
2674
2675 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2676 return;
2677
2678
2679
2680
2681 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2682 if (!phba->sli4_hba.hdwq[i].io_wq)
2683 continue;
2684 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2685
2686 if (!pring)
2687 continue;
2688
2689
2690 while (!list_empty(&pring->txcmplq)) {
2691 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2692 wait_cnt++;
2693
2694
2695
2696
2697 if ((wait_cnt % 1000) == 0) {
2698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2699 "6178 NVME IO not empty, "
2700 "cnt %d\n", wait_cnt);
2701 }
2702 }
2703 }
2704
2705
2706 lpfc_issue_hb_tmo(phba);
2707
2708}
2709
2710void
2711lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2712 uint32_t stat, uint32_t param)
2713{
2714#if (IS_ENABLED(CONFIG_NVME_FC))
2715 struct lpfc_io_buf *lpfc_ncmd;
2716 struct nvmefc_fcp_req *nCmd;
2717 struct lpfc_wcqe_complete wcqe;
2718 struct lpfc_wcqe_complete *wcqep = &wcqe;
2719
2720 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2721 if (!lpfc_ncmd) {
2722 lpfc_sli_release_iocbq(phba, pwqeIn);
2723 return;
2724 }
2725
2726 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2727 CMD_ABORT_XRI_CX) {
2728 lpfc_sli_release_iocbq(phba, pwqeIn);
2729 return;
2730 }
2731
2732 spin_lock(&lpfc_ncmd->buf_lock);
2733 nCmd = lpfc_ncmd->nvmeCmd;
2734 if (!nCmd) {
2735 spin_unlock(&lpfc_ncmd->buf_lock);
2736 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2737 return;
2738 }
2739 spin_unlock(&lpfc_ncmd->buf_lock);
2740
2741 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2742 "6194 NVME Cancel xri %x\n",
2743 lpfc_ncmd->cur_iocbq.sli4_xritag);
2744
2745 wcqep->word0 = 0;
2746 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2747 wcqep->parameter = param;
2748 wcqep->word3 = 0;
2749
2750
2751 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2752 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2753
2754 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
2755 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
2756#endif
2757}
2758