1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_nvme.h"
48#include "lpfc_scsi.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54
55
56static struct lpfc_io_buf *
57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58 int idx, int expedite);
59
60static void
61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63static struct nvme_fc_port_template lpfc_nvme_template;
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int
82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85{
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 if (!pnvme_lport->private)
92 return -ENOMEM;
93
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
95 vport = lport->vport;
96 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
97 if (qhandle == NULL)
98 return -ENOMEM;
99
100 qhandle->cpu_id = raw_smp_processor_id();
101 qhandle->qidx = qidx;
102
103
104
105
106
107 if (qidx) {
108 str = "IO ";
109 qhandle->index = ((qidx - 1) %
110 lpfc_nvme_template.max_hw_queues);
111 } else {
112 str = "ADM";
113 qhandle->index = qidx;
114 }
115
116 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
117 "6073 Binding %s HdwQueue %d (cpu %d) to "
118 "hdw_queue %d qhandle x%px\n", str,
119 qidx, qhandle->cpu_id, qhandle->index, qhandle);
120 *handle = (void *)qhandle;
121 return 0;
122}
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static void
139lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
140 unsigned int qidx,
141 void *handle)
142{
143 struct lpfc_nvme_lport *lport;
144 struct lpfc_vport *vport;
145
146 if (!pnvme_lport->private)
147 return;
148
149 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
150 vport = lport->vport;
151
152 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
153 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
154 lport, qidx, handle);
155 kfree(handle);
156}
157
158static void
159lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
160{
161 struct lpfc_nvme_lport *lport = localport->private;
162
163 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
164 "6173 localport x%px delete complete\n",
165 lport);
166
167
168 if (lport->vport->localport)
169 complete(lport->lport_unreg_cmp);
170}
171
172
173
174
175
176
177
178
179
180
181
182
183static void
184lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
185{
186 struct lpfc_nvme_rport *rport = remoteport->private;
187 struct lpfc_vport *vport;
188 struct lpfc_nodelist *ndlp;
189 u32 fc4_xpt_flags;
190
191 ndlp = rport->ndlp;
192 if (!ndlp) {
193 pr_err("**** %s: NULL ndlp on rport %p remoteport %p\n",
194 __func__, rport, remoteport);
195 goto rport_err;
196 }
197
198 vport = ndlp->vport;
199 if (!vport) {
200 pr_err("**** %s: Null vport on ndlp %p, ste x%x rport %p\n",
201 __func__, ndlp, ndlp->nlp_state, rport);
202 goto rport_err;
203 }
204
205 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
206
207
208
209
210
211 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
212 "6146 remoteport delete of remoteport %p\n",
213 remoteport);
214 spin_lock_irq(&ndlp->lock);
215
216
217
218
219 if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
220 ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
221
222 spin_unlock_irq(&ndlp->lock);
223
224
225
226
227
228 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
230
231 rport_err:
232 return;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251int
252lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
253 struct lpfc_async_xchg_ctx *axchg)
254{
255#if (IS_ENABLED(CONFIG_NVME_FC))
256 struct lpfc_vport *vport;
257 struct lpfc_nvme_rport *lpfc_rport;
258 struct nvme_fc_remote_port *remoteport;
259 struct lpfc_nvme_lport *lport;
260 uint32_t *payload = axchg->payload;
261 int rc;
262
263 vport = axchg->ndlp->vport;
264 lpfc_rport = axchg->ndlp->nrport;
265 if (!lpfc_rport)
266 return -EINVAL;
267
268 remoteport = lpfc_rport->remoteport;
269 if (!vport->localport)
270 return -EINVAL;
271
272 lport = vport->localport->private;
273 if (!lport)
274 return -EINVAL;
275
276 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
277 axchg->size);
278
279 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
280 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
281 "%08x %08x %08x\n",
282 axchg->size, rc,
283 *payload, *(payload+1), *(payload+2),
284 *(payload+3), *(payload+4), *(payload+5));
285
286 if (!rc)
287 return 0;
288#endif
289 return 1;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305void
306__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
307 struct lpfc_iocbq *cmdwqe,
308 struct lpfc_wcqe_complete *wcqe)
309{
310 struct nvmefc_ls_req *pnvme_lsreq;
311 struct lpfc_dmabuf *buf_ptr;
312 struct lpfc_nodelist *ndlp;
313 uint32_t status;
314
315 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
316 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
317 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
318
319 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
320 "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
321 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
322 "ndlp:x%px\n",
323 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
324 cmdwqe->sli4_xritag, status,
325 (wcqe->parameter & 0xffff),
326 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
327
328 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
329 cmdwqe->sli4_xritag, status, wcqe->parameter);
330
331 if (cmdwqe->context3) {
332 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
333 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
334 kfree(buf_ptr);
335 cmdwqe->context3 = NULL;
336 }
337 if (pnvme_lsreq->done)
338 pnvme_lsreq->done(pnvme_lsreq, status);
339 else
340 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
341 "6046 NVMEx cmpl without done call back? "
342 "Data %px DID %x Xri: %x status %x\n",
343 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
344 cmdwqe->sli4_xritag, status);
345 if (ndlp) {
346 lpfc_nlp_put(ndlp);
347 cmdwqe->context1 = NULL;
348 }
349 lpfc_sli_release_iocbq(phba, cmdwqe);
350}
351
352static void
353lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
354 struct lpfc_wcqe_complete *wcqe)
355{
356 struct lpfc_vport *vport = cmdwqe->vport;
357 struct lpfc_nvme_lport *lport;
358 uint32_t status;
359
360 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
361
362 if (vport->localport) {
363 lport = (struct lpfc_nvme_lport *)vport->localport->private;
364 if (lport) {
365 atomic_inc(&lport->fc4NvmeLsCmpls);
366 if (status) {
367 if (bf_get(lpfc_wcqe_c_xb, wcqe))
368 atomic_inc(&lport->cmpl_ls_xb);
369 atomic_inc(&lport->cmpl_ls_err);
370 }
371 }
372 }
373
374 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
375}
376
377static int
378lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
379 struct lpfc_dmabuf *inp,
380 struct nvmefc_ls_req *pnvme_lsreq,
381 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
382 struct lpfc_wcqe_complete *),
383 struct lpfc_nodelist *ndlp, uint32_t num_entry,
384 uint32_t tmo, uint8_t retry)
385{
386 struct lpfc_hba *phba = vport->phba;
387 union lpfc_wqe128 *wqe;
388 struct lpfc_iocbq *genwqe;
389 struct ulp_bde64 *bpl;
390 struct ulp_bde64 bde;
391 int i, rc, xmit_len, first_len;
392
393
394 genwqe = lpfc_sli_get_iocbq(phba);
395 if (genwqe == NULL)
396 return 1;
397
398 wqe = &genwqe->wqe;
399
400 memset(wqe, 0, sizeof(union lpfc_wqe));
401
402 genwqe->context3 = (uint8_t *)bmp;
403 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
404
405
406 genwqe->context1 = lpfc_nlp_get(ndlp);
407 if (!genwqe->context1) {
408 dev_warn(&phba->pcidev->dev,
409 "Warning: Failed node ref, not sending LS_REQ\n");
410 lpfc_sli_release_iocbq(phba, genwqe);
411 return 1;
412 }
413
414 genwqe->context2 = (uint8_t *)pnvme_lsreq;
415
416
417 if (!tmo)
418
419 tmo = (3 * phba->fc_ratov);
420
421
422 xmit_len = 0;
423 first_len = 0;
424 bpl = (struct ulp_bde64 *)bmp->virt;
425 for (i = 0; i < num_entry; i++) {
426 bde.tus.w = bpl[i].tus.w;
427 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
428 break;
429 xmit_len += bde.tus.f.bdeSize;
430 if (i == 0)
431 first_len = xmit_len;
432 }
433
434 genwqe->rsvd2 = num_entry;
435 genwqe->hba_wqidx = 0;
436
437
438 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
439 wqe->generic.bde.tus.f.bdeSize = first_len;
440 wqe->generic.bde.addrLow = bpl[0].addrLow;
441 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
442
443
444 wqe->gen_req.request_payload_len = first_len;
445
446
447
448
449 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
450 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
451 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
452 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
453 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
454
455
456 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
457 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
458 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
459
460
461 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
462 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
463 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
464 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
465
466
467 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
468
469
470 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
471
472
473 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
474 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
475 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
476 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
477 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
478
479
480 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
481 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
482
483
484
485 genwqe->wqe_cmpl = cmpl;
486 genwqe->iocb_cmpl = NULL;
487 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
488 genwqe->vport = vport;
489 genwqe->retry = retry;
490
491 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
492 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
493
494 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
495 if (rc) {
496 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
497 "6045 Issue GEN REQ WQE to NPORT x%x "
498 "Data: x%x x%x rc x%x\n",
499 ndlp->nlp_DID, genwqe->iotag,
500 vport->port_state, rc);
501 lpfc_nlp_put(ndlp);
502 lpfc_sli_release_iocbq(phba, genwqe);
503 return 1;
504 }
505
506 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
507 "6050 Issue GEN REQ WQE to NPORT x%x "
508 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
509 "bmp:x%px xmit:%d 1st:%d\n",
510 ndlp->nlp_DID, genwqe->sli4_xritag,
511 vport->port_state,
512 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
513 return 0;
514}
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531int
532__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
533 struct nvmefc_ls_req *pnvme_lsreq,
534 void (*gen_req_cmp)(struct lpfc_hba *phba,
535 struct lpfc_iocbq *cmdwqe,
536 struct lpfc_wcqe_complete *wcqe))
537{
538 struct lpfc_dmabuf *bmp;
539 struct ulp_bde64 *bpl;
540 int ret;
541 uint16_t ntype, nstate;
542
543 if (!ndlp) {
544 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
545 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
546 "LS Req\n",
547 ndlp);
548 return -ENODEV;
549 }
550
551 ntype = ndlp->nlp_type;
552 nstate = ndlp->nlp_state;
553 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
554 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
555 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
556 "6088 NVMEx LS REQ: Fail DID x%06x not "
557 "ready for IO. Type x%x, State x%x\n",
558 ndlp->nlp_DID, ntype, nstate);
559 return -ENODEV;
560 }
561
562 if (!vport->phba->sli4_hba.nvmels_wq)
563 return -ENOMEM;
564
565
566
567
568
569
570
571
572
573
574
575
576
577 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
578 if (!bmp) {
579 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
580 "6044 NVMEx LS REQ: Could not alloc LS buf "
581 "for DID %x\n",
582 ndlp->nlp_DID);
583 return -ENOMEM;
584 }
585
586 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
587 if (!bmp->virt) {
588 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
589 "6042 NVMEx LS REQ: Could not alloc mbuf "
590 "for DID %x\n",
591 ndlp->nlp_DID);
592 kfree(bmp);
593 return -ENOMEM;
594 }
595
596 INIT_LIST_HEAD(&bmp->list);
597
598 bpl = (struct ulp_bde64 *)bmp->virt;
599 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
600 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
601 bpl->tus.f.bdeFlags = 0;
602 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
603 bpl->tus.w = le32_to_cpu(bpl->tus.w);
604 bpl++;
605
606 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
607 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
608 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
609 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
610 bpl->tus.w = le32_to_cpu(bpl->tus.w);
611
612 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
613 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
614 "rqstlen:%d rsplen:%d %pad %pad\n",
615 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
616 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
617 &pnvme_lsreq->rspdma);
618
619 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
620 pnvme_lsreq, gen_req_cmp, ndlp, 2,
621 pnvme_lsreq->timeout, 0);
622 if (ret != WQE_SUCCESS) {
623 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
624 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
625 "lsreq x%px Status %x DID %x\n",
626 pnvme_lsreq, ret, ndlp->nlp_DID);
627 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
628 kfree(bmp);
629 return -EIO;
630 }
631
632 return 0;
633}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648static int
649lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
650 struct nvme_fc_remote_port *pnvme_rport,
651 struct nvmefc_ls_req *pnvme_lsreq)
652{
653 struct lpfc_nvme_lport *lport;
654 struct lpfc_nvme_rport *rport;
655 struct lpfc_vport *vport;
656 int ret;
657
658 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
659 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
660 if (unlikely(!lport) || unlikely(!rport))
661 return -EINVAL;
662
663 vport = lport->vport;
664 if (vport->load_flag & FC_UNLOADING)
665 return -ENODEV;
666
667 atomic_inc(&lport->fc4NvmeLsRequests);
668
669 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
670 lpfc_nvme_ls_req_cmp);
671 if (ret)
672 atomic_inc(&lport->xmt_ls_err);
673
674 return ret;
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691int
692__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
693 struct nvmefc_ls_req *pnvme_lsreq)
694{
695 struct lpfc_hba *phba = vport->phba;
696 struct lpfc_sli_ring *pring;
697 struct lpfc_iocbq *wqe, *next_wqe;
698 bool foundit = false;
699
700 if (!ndlp) {
701 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
702 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
703 "x%06x, Failing LS Req\n",
704 ndlp, ndlp ? ndlp->nlp_DID : 0);
705 return -EINVAL;
706 }
707
708 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
709 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
710 "x%p rqstlen:%d rsplen:%d %pad %pad\n",
711 pnvme_lsreq, pnvme_lsreq->rqstlen,
712 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
713 &pnvme_lsreq->rspdma);
714
715
716
717
718
719 pring = phba->sli4_hba.nvmels_wq->pring;
720 spin_lock_irq(&phba->hbalock);
721 spin_lock(&pring->ring_lock);
722 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
723 if (wqe->context2 == pnvme_lsreq) {
724 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
725 foundit = true;
726 break;
727 }
728 }
729 spin_unlock(&pring->ring_lock);
730
731 if (foundit)
732 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
733 spin_unlock_irq(&phba->hbalock);
734
735 if (foundit)
736 return 0;
737
738 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
739 "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
740 pnvme_lsreq);
741 return -EINVAL;
742}
743
744static int
745lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
746 struct nvme_fc_remote_port *remoteport,
747 struct nvmefc_ls_rsp *ls_rsp)
748{
749 struct lpfc_async_xchg_ctx *axchg =
750 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
751 struct lpfc_nvme_lport *lport;
752 int rc;
753
754 if (axchg->phba->pport->load_flag & FC_UNLOADING)
755 return -ENODEV;
756
757 lport = (struct lpfc_nvme_lport *)localport->private;
758
759 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
760
761 if (rc) {
762
763
764
765
766
767 if (rc != -EALREADY)
768 atomic_inc(&lport->xmt_ls_abort);
769 return rc;
770 }
771
772 return 0;
773}
774
775
776
777
778
779
780
781
782
783
784static void
785lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
786 struct nvme_fc_remote_port *pnvme_rport,
787 struct nvmefc_ls_req *pnvme_lsreq)
788{
789 struct lpfc_nvme_lport *lport;
790 struct lpfc_vport *vport;
791 struct lpfc_nodelist *ndlp;
792 int ret;
793
794 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
795 if (unlikely(!lport))
796 return;
797 vport = lport->vport;
798
799 if (vport->load_flag & FC_UNLOADING)
800 return;
801
802 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
803
804 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
805 if (!ret)
806 atomic_inc(&lport->xmt_ls_abort);
807}
808
809
810static inline void
811lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
812 struct lpfc_io_buf *lpfc_ncmd,
813 struct nvmefc_fcp_req *nCmd)
814{
815 struct lpfc_hba *phba = vport->phba;
816 struct sli4_sge *sgl;
817 union lpfc_wqe128 *wqe;
818 uint32_t *wptr, *dptr;
819
820
821
822
823
824
825
826 wqe = &lpfc_ncmd->cur_iocbq.wqe;
827
828
829
830
831
832
833
834
835 sgl = lpfc_ncmd->dma_sgl;
836 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
837 if (phba->cfg_nvme_embed_cmd) {
838 sgl->addr_hi = 0;
839 sgl->addr_lo = 0;
840
841
842 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
843 wqe->generic.bde.tus.f.bdeSize = 56;
844 wqe->generic.bde.addrHigh = 0;
845 wqe->generic.bde.addrLow = 64;
846
847
848
849
850
851
852
853
854
855
856
857 wptr = &wqe->words[16];
858 dptr = (uint32_t *)nCmd->cmdaddr;
859 dptr++;
860
861 *wptr++ = *dptr++;
862 *wptr++ = *dptr++;
863 *wptr++ = *dptr++;
864 *wptr++ = *dptr++;
865 dptr++;
866 *wptr++ = *dptr++;
867 *wptr++ = *dptr++;
868 dptr += 8;
869 *wptr++ = *dptr++;
870 *wptr++ = *dptr++;
871 *wptr++ = *dptr++;
872 *wptr++ = *dptr++;
873 *wptr++ = *dptr++;
874 *wptr++ = *dptr++;
875 *wptr++ = *dptr++;
876 *wptr = *dptr;
877 } else {
878 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
879 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
880
881
882 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
883 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
884 wqe->generic.bde.addrHigh = sgl->addr_hi;
885 wqe->generic.bde.addrLow = sgl->addr_lo;
886
887
888 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
889 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
890 }
891
892 sgl++;
893
894
895 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
896 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
897 sgl->word2 = le32_to_cpu(sgl->word2);
898 if (nCmd->sg_cnt)
899 bf_set(lpfc_sli4_sge_last, sgl, 0);
900 else
901 bf_set(lpfc_sli4_sge_last, sgl, 1);
902 sgl->word2 = cpu_to_le32(sgl->word2);
903 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918static void
919lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
920 struct lpfc_wcqe_complete *wcqe)
921{
922 struct lpfc_io_buf *lpfc_ncmd =
923 (struct lpfc_io_buf *)pwqeIn->context1;
924 struct lpfc_vport *vport = pwqeIn->vport;
925 struct nvmefc_fcp_req *nCmd;
926 struct nvme_fc_ersp_iu *ep;
927 struct nvme_fc_cmd_iu *cp;
928 struct lpfc_nodelist *ndlp;
929 struct lpfc_nvme_fcpreq_priv *freqpriv;
930 struct lpfc_nvme_lport *lport;
931 uint32_t code, status, idx;
932 uint16_t cid, sqhd, data;
933 uint32_t *ptr;
934#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
935 int cpu;
936#endif
937
938
939 if (!lpfc_ncmd) {
940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
941 "6071 Null lpfc_ncmd pointer. No "
942 "release, skip completion\n");
943 return;
944 }
945
946
947 spin_lock(&lpfc_ncmd->buf_lock);
948
949 if (!lpfc_ncmd->nvmeCmd) {
950 spin_unlock(&lpfc_ncmd->buf_lock);
951 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
952 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
953 "nvmeCmd x%px\n",
954 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
955
956
957 lpfc_release_nvme_buf(phba, lpfc_ncmd);
958 return;
959 }
960 nCmd = lpfc_ncmd->nvmeCmd;
961 status = bf_get(lpfc_wcqe_c_status, wcqe);
962
963 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
964 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
965
966 if (unlikely(status && vport->localport)) {
967 lport = (struct lpfc_nvme_lport *)vport->localport->private;
968 if (lport) {
969 if (bf_get(lpfc_wcqe_c_xb, wcqe))
970 atomic_inc(&lport->cmpl_fcp_xb);
971 atomic_inc(&lport->cmpl_fcp_err);
972 }
973 }
974
975 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
976 lpfc_ncmd->cur_iocbq.sli4_xritag,
977 status, wcqe->parameter);
978
979
980
981
982 ndlp = lpfc_ncmd->ndlp;
983 if (!ndlp) {
984 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
985 "6062 Ignoring NVME cmpl. No ndlp\n");
986 goto out_err;
987 }
988
989 code = bf_get(lpfc_wcqe_c_code, wcqe);
990 if (code == CQE_CODE_NVME_ERSP) {
991
992 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
993
994
995
996
997
998 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
999 cid = cp->sqe.common.command_id;
1000
1001
1002
1003
1004
1005
1006
1007 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1008
1009
1010 ep->iu_len = cpu_to_be16(8);
1011 ep->rsn = wcqe->parameter;
1012 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1013 ep->rsvd12 = 0;
1014 ptr = (uint32_t *)&ep->cqe.result.u64;
1015 *ptr++ = wcqe->total_data_placed;
1016 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1017 *ptr = (uint32_t)data;
1018 ep->cqe.sq_head = sqhd;
1019 ep->cqe.sq_id = nCmd->sqid;
1020 ep->cqe.command_id = cid;
1021 ep->cqe.status = 0;
1022
1023 lpfc_ncmd->status = IOSTAT_SUCCESS;
1024 lpfc_ncmd->result = 0;
1025 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1026 nCmd->transferred_length = nCmd->payload_length;
1027 } else {
1028 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1029 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 switch (lpfc_ncmd->status) {
1043 case IOSTAT_SUCCESS:
1044 nCmd->transferred_length = wcqe->total_data_placed;
1045 nCmd->rcv_rsplen = 0;
1046 nCmd->status = 0;
1047 break;
1048 case IOSTAT_FCP_RSP_ERROR:
1049 nCmd->transferred_length = wcqe->total_data_placed;
1050 nCmd->rcv_rsplen = wcqe->parameter;
1051 nCmd->status = 0;
1052
1053 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1054 break;
1055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1056 "6081 NVME Completion Protocol Error: "
1057 "xri %x status x%x result x%x "
1058 "placed x%x\n",
1059 lpfc_ncmd->cur_iocbq.sli4_xritag,
1060 lpfc_ncmd->status, lpfc_ncmd->result,
1061 wcqe->total_data_placed);
1062 break;
1063 case IOSTAT_LOCAL_REJECT:
1064
1065 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1066 lpfc_printf_vlog(vport, KERN_INFO,
1067 LOG_NVME_IOERR,
1068 "6032 Delay Aborted cmd x%px "
1069 "nvme cmd x%px, xri x%x, "
1070 "xb %d\n",
1071 lpfc_ncmd, nCmd,
1072 lpfc_ncmd->cur_iocbq.sli4_xritag,
1073 bf_get(lpfc_wcqe_c_xb, wcqe));
1074 fallthrough;
1075 default:
1076out_err:
1077 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1078 "6072 NVME Completion Error: xri %x "
1079 "status x%x result x%x [x%x] "
1080 "placed x%x\n",
1081 lpfc_ncmd->cur_iocbq.sli4_xritag,
1082 lpfc_ncmd->status, lpfc_ncmd->result,
1083 wcqe->parameter,
1084 wcqe->total_data_placed);
1085 nCmd->transferred_length = 0;
1086 nCmd->rcv_rsplen = 0;
1087 nCmd->status = NVME_SC_INTERNAL;
1088 }
1089 }
1090
1091
1092 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1093 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1094 else
1095 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1096
1097
1098
1099
1100
1101#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1102 if (lpfc_ncmd->ts_cmd_start) {
1103 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1104 lpfc_ncmd->ts_data_io = ktime_get_ns();
1105 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1106 lpfc_io_ktime(phba, lpfc_ncmd);
1107 }
1108 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1109 cpu = raw_smp_processor_id();
1110 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1111 if (lpfc_ncmd->cpu != cpu)
1112 lpfc_printf_vlog(vport,
1113 KERN_INFO, LOG_NVME_IOERR,
1114 "6701 CPU Check cmpl: "
1115 "cpu %d expect %d\n",
1116 cpu, lpfc_ncmd->cpu);
1117 }
1118#endif
1119
1120
1121
1122
1123
1124 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1125 freqpriv = nCmd->private;
1126 freqpriv->nvme_buf = NULL;
1127 lpfc_ncmd->nvmeCmd = NULL;
1128 spin_unlock(&lpfc_ncmd->buf_lock);
1129 nCmd->done(nCmd);
1130 } else
1131 spin_unlock(&lpfc_ncmd->buf_lock);
1132
1133
1134 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static int
1154lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1155 struct lpfc_io_buf *lpfc_ncmd,
1156 struct lpfc_nodelist *pnode,
1157 struct lpfc_fc4_ctrl_stat *cstat)
1158{
1159 struct lpfc_hba *phba = vport->phba;
1160 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1161 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1162 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1163 uint32_t req_len;
1164
1165
1166
1167
1168
1169 if (nCmd->sg_cnt) {
1170 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1171
1172 memcpy(&wqe->words[7],
1173 &lpfc_iwrite_cmd_template.words[7],
1174 sizeof(uint32_t) * 5);
1175
1176
1177 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1178
1179
1180 if ((phba->cfg_nvme_enable_fb) &&
1181 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1182 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1183 if (req_len < pnode->nvme_fb_size)
1184 wqe->fcp_iwrite.initial_xfer_len =
1185 req_len;
1186 else
1187 wqe->fcp_iwrite.initial_xfer_len =
1188 pnode->nvme_fb_size;
1189 } else {
1190 wqe->fcp_iwrite.initial_xfer_len = 0;
1191 }
1192 cstat->output_requests++;
1193 } else {
1194
1195 memcpy(&wqe->words[7],
1196 &lpfc_iread_cmd_template.words[7],
1197 sizeof(uint32_t) * 5);
1198
1199
1200 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1201
1202
1203 wqe->fcp_iread.rsrvd5 = 0;
1204
1205 cstat->input_requests++;
1206 }
1207 } else {
1208
1209 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1210 sizeof(uint32_t) * 8);
1211 cstat->control_requests++;
1212 }
1213
1214 if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1215 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1216
1217
1218
1219
1220
1221
1222 bf_set(payload_offset_len, &wqe->fcp_icmd,
1223 (nCmd->rsplen + nCmd->cmdlen));
1224
1225
1226 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1227 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1228 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1229
1230
1231 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1232
1233
1234 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1235
1236
1237 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1238
1239
1240
1241 pwqeq->vport = vport;
1242 return 0;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static int
1260lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1261 struct lpfc_io_buf *lpfc_ncmd)
1262{
1263 struct lpfc_hba *phba = vport->phba;
1264 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1265 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1266 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1267 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1268 struct scatterlist *data_sg;
1269 struct sli4_sge *first_data_sgl;
1270 struct ulp_bde64 *bde;
1271 dma_addr_t physaddr = 0;
1272 uint32_t num_bde = 0;
1273 uint32_t dma_len = 0;
1274 uint32_t dma_offset = 0;
1275 int nseg, i, j;
1276 bool lsp_just_set = false;
1277
1278
1279 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1280
1281
1282
1283
1284
1285 if (nCmd->sg_cnt) {
1286
1287
1288
1289
1290 sgl += 2;
1291
1292 first_data_sgl = sgl;
1293 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1294 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1296 "6058 Too many sg segments from "
1297 "NVME Transport. Max %d, "
1298 "nvmeIO sg_cnt %d\n",
1299 phba->cfg_nvme_seg_cnt + 1,
1300 lpfc_ncmd->seg_cnt);
1301 lpfc_ncmd->seg_cnt = 0;
1302 return 1;
1303 }
1304
1305
1306
1307
1308
1309
1310
1311 nseg = nCmd->sg_cnt;
1312 data_sg = nCmd->first_sgl;
1313
1314
1315 j = 2;
1316 for (i = 0; i < nseg; i++) {
1317 if (data_sg == NULL) {
1318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319 "6059 dptr err %d, nseg %d\n",
1320 i, nseg);
1321 lpfc_ncmd->seg_cnt = 0;
1322 return 1;
1323 }
1324
1325 sgl->word2 = 0;
1326 if ((num_bde + 1) == nseg) {
1327 bf_set(lpfc_sli4_sge_last, sgl, 1);
1328 bf_set(lpfc_sli4_sge_type, sgl,
1329 LPFC_SGE_TYPE_DATA);
1330 } else {
1331 bf_set(lpfc_sli4_sge_last, sgl, 0);
1332
1333
1334 if (!lsp_just_set &&
1335 !((j + 1) % phba->border_sge_num) &&
1336 ((nseg - 1) != i)) {
1337
1338 bf_set(lpfc_sli4_sge_type, sgl,
1339 LPFC_SGE_TYPE_LSP);
1340
1341 sgl_xtra = lpfc_get_sgl_per_hdwq(
1342 phba, lpfc_ncmd);
1343
1344 if (unlikely(!sgl_xtra)) {
1345 lpfc_ncmd->seg_cnt = 0;
1346 return 1;
1347 }
1348 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1349 sgl_xtra->dma_phys_sgl));
1350 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1351 sgl_xtra->dma_phys_sgl));
1352
1353 } else {
1354 bf_set(lpfc_sli4_sge_type, sgl,
1355 LPFC_SGE_TYPE_DATA);
1356 }
1357 }
1358
1359 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1360 LPFC_SGE_TYPE_LSP)) {
1361 if ((nseg - 1) == i)
1362 bf_set(lpfc_sli4_sge_last, sgl, 1);
1363
1364 physaddr = data_sg->dma_address;
1365 dma_len = data_sg->length;
1366 sgl->addr_lo = cpu_to_le32(
1367 putPaddrLow(physaddr));
1368 sgl->addr_hi = cpu_to_le32(
1369 putPaddrHigh(physaddr));
1370
1371 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1372 sgl->word2 = cpu_to_le32(sgl->word2);
1373 sgl->sge_len = cpu_to_le32(dma_len);
1374
1375 dma_offset += dma_len;
1376 data_sg = sg_next(data_sg);
1377
1378 sgl++;
1379
1380 lsp_just_set = false;
1381 } else {
1382 sgl->word2 = cpu_to_le32(sgl->word2);
1383
1384 sgl->sge_len = cpu_to_le32(
1385 phba->cfg_sg_dma_buf_size);
1386
1387 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1388 i = i - 1;
1389
1390 lsp_just_set = true;
1391 }
1392
1393 j++;
1394 }
1395 if (phba->cfg_enable_pbde) {
1396
1397
1398 bde = (struct ulp_bde64 *)
1399 &wqe->words[13];
1400 bde->addrLow = first_data_sgl->addr_lo;
1401 bde->addrHigh = first_data_sgl->addr_hi;
1402 bde->tus.f.bdeSize =
1403 le32_to_cpu(first_data_sgl->sge_len);
1404 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1405 bde->tus.w = cpu_to_le32(bde->tus.w);
1406
1407
1408 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1409 } else {
1410 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1411 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1412 }
1413
1414 } else {
1415 lpfc_ncmd->seg_cnt = 0;
1416
1417
1418
1419
1420 if (nCmd->payload_length != 0) {
1421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1422 "6063 NVME DMA Prep Err: sg_cnt %d "
1423 "payload_length x%x\n",
1424 nCmd->sg_cnt, nCmd->payload_length);
1425 return 1;
1426 }
1427 }
1428 return 0;
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static int
1447lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1448 struct nvme_fc_remote_port *pnvme_rport,
1449 void *hw_queue_handle,
1450 struct nvmefc_fcp_req *pnvme_fcreq)
1451{
1452 int ret = 0;
1453 int expedite = 0;
1454 int idx, cpu;
1455 struct lpfc_nvme_lport *lport;
1456 struct lpfc_fc4_ctrl_stat *cstat;
1457 struct lpfc_vport *vport;
1458 struct lpfc_hba *phba;
1459 struct lpfc_nodelist *ndlp;
1460 struct lpfc_io_buf *lpfc_ncmd;
1461 struct lpfc_nvme_rport *rport;
1462 struct lpfc_nvme_qhandle *lpfc_queue_info;
1463 struct lpfc_nvme_fcpreq_priv *freqpriv;
1464 struct nvme_common_command *sqe;
1465#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1466 uint64_t start = 0;
1467#endif
1468
1469
1470
1471
1472 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1473 if (unlikely(!lport)) {
1474 ret = -EINVAL;
1475 goto out_fail;
1476 }
1477
1478 vport = lport->vport;
1479
1480 if (unlikely(!hw_queue_handle)) {
1481 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1482 "6117 Fail IO, NULL hw_queue_handle\n");
1483 atomic_inc(&lport->xmt_fcp_err);
1484 ret = -EBUSY;
1485 goto out_fail;
1486 }
1487
1488 phba = vport->phba;
1489
1490 if (unlikely(vport->load_flag & FC_UNLOADING)) {
1491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1492 "6124 Fail IO, Driver unload\n");
1493 atomic_inc(&lport->xmt_fcp_err);
1494 ret = -ENODEV;
1495 goto out_fail;
1496 }
1497
1498 freqpriv = pnvme_fcreq->private;
1499 if (unlikely(!freqpriv)) {
1500 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1501 "6158 Fail IO, NULL request data\n");
1502 atomic_inc(&lport->xmt_fcp_err);
1503 ret = -EINVAL;
1504 goto out_fail;
1505 }
1506
1507#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1508 if (phba->ktime_on)
1509 start = ktime_get_ns();
1510#endif
1511 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1512 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1513
1514
1515
1516
1517
1518 ndlp = rport->ndlp;
1519 if (!ndlp) {
1520 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1521 "6053 Busy IO, ndlp not ready: rport x%px "
1522 "ndlp x%px, DID x%06x\n",
1523 rport, ndlp, pnvme_rport->port_id);
1524 atomic_inc(&lport->xmt_fcp_err);
1525 ret = -EBUSY;
1526 goto out_fail;
1527 }
1528
1529
1530 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1531 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1532 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1533 "6036 Fail IO, DID x%06x not ready for "
1534 "IO. State x%x, Type x%x Flg x%x\n",
1535 pnvme_rport->port_id,
1536 ndlp->nlp_state, ndlp->nlp_type,
1537 ndlp->fc4_xpt_flags);
1538 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1539 ret = -EBUSY;
1540 goto out_fail;
1541
1542 }
1543
1544
1545
1546
1547
1548 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1549 sqe = &((struct nvme_fc_cmd_iu *)
1550 pnvme_fcreq->cmdaddr)->sqe.common;
1551 if (sqe->opcode == nvme_admin_keep_alive)
1552 expedite = 1;
1553 }
1554
1555
1556
1557
1558 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1559 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1560 !expedite) {
1561 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1562 "6174 Fail IO, ndlp qdepth exceeded: "
1563 "idx %d DID %x pend %d qdepth %d\n",
1564 lpfc_queue_info->index, ndlp->nlp_DID,
1565 atomic_read(&ndlp->cmd_pending),
1566 ndlp->cmd_qdepth);
1567 atomic_inc(&lport->xmt_fcp_qdepth);
1568 ret = -EBUSY;
1569 goto out_fail;
1570 }
1571 }
1572
1573
1574 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1575 idx = lpfc_queue_info->index;
1576 } else {
1577 cpu = raw_smp_processor_id();
1578 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1579 }
1580
1581 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1582 if (lpfc_ncmd == NULL) {
1583 atomic_inc(&lport->xmt_fcp_noxri);
1584 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1585 "6065 Fail IO, driver buffer pool is empty: "
1586 "idx %d DID %x\n",
1587 lpfc_queue_info->index, ndlp->nlp_DID);
1588 ret = -EBUSY;
1589 goto out_fail;
1590 }
1591#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1592 if (start) {
1593 lpfc_ncmd->ts_cmd_start = start;
1594 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1595 } else {
1596 lpfc_ncmd->ts_cmd_start = 0;
1597 }
1598#endif
1599
1600
1601
1602
1603
1604
1605
1606 freqpriv->nvme_buf = lpfc_ncmd;
1607 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1608 lpfc_ncmd->ndlp = ndlp;
1609 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1620 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1621
1622 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1623 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1624 if (ret) {
1625 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1626 "6175 Fail IO, Prep DMA: "
1627 "idx %d DID %x\n",
1628 lpfc_queue_info->index, ndlp->nlp_DID);
1629 atomic_inc(&lport->xmt_fcp_err);
1630 ret = -ENOMEM;
1631 goto out_free_nvme_buf;
1632 }
1633
1634 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1635 lpfc_ncmd->cur_iocbq.sli4_xritag,
1636 lpfc_queue_info->index, ndlp->nlp_DID);
1637
1638 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1639 if (ret) {
1640 atomic_inc(&lport->xmt_fcp_wqerr);
1641 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1642 "6113 Fail IO, Could not issue WQE err %x "
1643 "sid: x%x did: x%x oxid: x%x\n",
1644 ret, vport->fc_myDID, ndlp->nlp_DID,
1645 lpfc_ncmd->cur_iocbq.sli4_xritag);
1646 goto out_free_nvme_buf;
1647 }
1648
1649 if (phba->cfg_xri_rebalancing)
1650 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1651
1652#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1653 if (lpfc_ncmd->ts_cmd_start)
1654 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1655
1656 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1657 cpu = raw_smp_processor_id();
1658 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1659 lpfc_ncmd->cpu = cpu;
1660 if (idx != cpu)
1661 lpfc_printf_vlog(vport,
1662 KERN_INFO, LOG_NVME_IOERR,
1663 "6702 CPU Check cmd: "
1664 "cpu %d wq %d\n",
1665 lpfc_ncmd->cpu,
1666 lpfc_queue_info->index);
1667 }
1668#endif
1669 return 0;
1670
1671 out_free_nvme_buf:
1672 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1673 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1674 cstat->output_requests--;
1675 else
1676 cstat->input_requests--;
1677 } else
1678 cstat->control_requests--;
1679 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1680 out_fail:
1681 return ret;
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695void
1696lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1697 struct lpfc_wcqe_complete *abts_cmpl)
1698{
1699 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1700 "6145 ABORT_XRI_CN completing on rpi x%x "
1701 "original iotag x%x, abort cmd iotag x%x "
1702 "req_tag x%x, status x%x, hwstatus x%x\n",
1703 cmdiocb->iocb.un.acxri.abortContextTag,
1704 cmdiocb->iocb.un.acxri.abortIoTag,
1705 cmdiocb->iotag,
1706 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1707 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1708 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1709 lpfc_sli_release_iocbq(phba, cmdiocb);
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728static void
1729lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1730 struct nvme_fc_remote_port *pnvme_rport,
1731 void *hw_queue_handle,
1732 struct nvmefc_fcp_req *pnvme_fcreq)
1733{
1734 struct lpfc_nvme_lport *lport;
1735 struct lpfc_vport *vport;
1736 struct lpfc_hba *phba;
1737 struct lpfc_io_buf *lpfc_nbuf;
1738 struct lpfc_iocbq *nvmereq_wqe;
1739 struct lpfc_nvme_fcpreq_priv *freqpriv;
1740 unsigned long flags;
1741 int ret_val;
1742
1743
1744
1745
1746 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1747 if (unlikely(!lport))
1748 return;
1749
1750 vport = lport->vport;
1751
1752 if (unlikely(!hw_queue_handle)) {
1753 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1754 "6129 Fail Abort, HW Queue Handle NULL.\n");
1755 return;
1756 }
1757
1758 phba = vport->phba;
1759 freqpriv = pnvme_fcreq->private;
1760
1761 if (unlikely(!freqpriv))
1762 return;
1763 if (vport->load_flag & FC_UNLOADING)
1764 return;
1765
1766
1767 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1768 "6002 Abort Request to rport DID x%06x "
1769 "for nvme_fc_req x%px\n",
1770 pnvme_rport->port_id,
1771 pnvme_fcreq);
1772
1773
1774
1775
1776 spin_lock_irqsave(&phba->hbalock, flags);
1777
1778 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1779 spin_unlock_irqrestore(&phba->hbalock, flags);
1780 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1781 "6139 Driver in reset cleanup - flushing "
1782 "NVME Req now. hba_flag x%x\n",
1783 phba->hba_flag);
1784 return;
1785 }
1786
1787 lpfc_nbuf = freqpriv->nvme_buf;
1788 if (!lpfc_nbuf) {
1789 spin_unlock_irqrestore(&phba->hbalock, flags);
1790 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1791 "6140 NVME IO req has no matching lpfc nvme "
1792 "io buffer. Skipping abort req.\n");
1793 return;
1794 } else if (!lpfc_nbuf->nvmeCmd) {
1795 spin_unlock_irqrestore(&phba->hbalock, flags);
1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1797 "6141 lpfc NVME IO req has no nvme_fcreq "
1798 "io buffer. Skipping abort req.\n");
1799 return;
1800 }
1801 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1802
1803
1804 spin_lock(&lpfc_nbuf->buf_lock);
1805
1806
1807
1808
1809
1810
1811
1812
1813 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1814 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1815 "6143 NVME req mismatch: "
1816 "lpfc_nbuf x%px nvmeCmd x%px, "
1817 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1818 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1819 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1820 goto out_unlock;
1821 }
1822
1823
1824 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1825 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1826 "6142 NVME IO req x%px not queued - skipping "
1827 "abort req xri x%x\n",
1828 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1829 goto out_unlock;
1830 }
1831
1832 atomic_inc(&lport->xmt_fcp_abort);
1833 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1834 nvmereq_wqe->sli4_xritag,
1835 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1836
1837
1838 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1839 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1840 "6144 Outstanding NVME I/O Abort Request "
1841 "still pending on nvme_fcreq x%px, "
1842 "lpfc_ncmd %px xri x%x\n",
1843 pnvme_fcreq, lpfc_nbuf,
1844 nvmereq_wqe->sli4_xritag);
1845 goto out_unlock;
1846 }
1847
1848 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1849 lpfc_nvme_abort_fcreq_cmpl);
1850
1851 spin_unlock(&lpfc_nbuf->buf_lock);
1852 spin_unlock_irqrestore(&phba->hbalock, flags);
1853
1854
1855 lpfc_issue_hb_tmo(phba);
1856
1857 if (ret_val != WQE_SUCCESS) {
1858 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1859 "6137 Failed abts issue_wqe with status x%x "
1860 "for nvme_fcreq x%px.\n",
1861 ret_val, pnvme_fcreq);
1862 return;
1863 }
1864
1865 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1866 "6138 Transport Abort NVME Request Issued for "
1867 "ox_id x%x\n",
1868 nvmereq_wqe->sli4_xritag);
1869 return;
1870
1871out_unlock:
1872 spin_unlock(&lpfc_nbuf->buf_lock);
1873 spin_unlock_irqrestore(&phba->hbalock, flags);
1874 return;
1875}
1876
1877
1878static struct nvme_fc_port_template lpfc_nvme_template = {
1879
1880 .localport_delete = lpfc_nvme_localport_delete,
1881 .remoteport_delete = lpfc_nvme_remoteport_delete,
1882 .create_queue = lpfc_nvme_create_queue,
1883 .delete_queue = lpfc_nvme_delete_queue,
1884 .ls_req = lpfc_nvme_ls_req,
1885 .fcp_io = lpfc_nvme_fcp_io_submit,
1886 .ls_abort = lpfc_nvme_ls_abort,
1887 .fcp_abort = lpfc_nvme_fcp_abort,
1888 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
1889
1890 .max_hw_queues = 1,
1891 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1892 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1893 .dma_boundary = 0xFFFFFFFF,
1894
1895
1896
1897
1898 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1899 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1900 .lsrqst_priv_sz = 0,
1901 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1902};
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914static struct lpfc_io_buf *
1915lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1916 int idx, int expedite)
1917{
1918 struct lpfc_io_buf *lpfc_ncmd;
1919 struct lpfc_sli4_hdw_queue *qp;
1920 struct sli4_sge *sgl;
1921 struct lpfc_iocbq *pwqeq;
1922 union lpfc_wqe128 *wqe;
1923
1924 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1925
1926 if (lpfc_ncmd) {
1927 pwqeq = &(lpfc_ncmd->cur_iocbq);
1928 wqe = &pwqeq->wqe;
1929
1930
1931
1932
1933 pwqeq->iocb_flag = LPFC_IO_NVME;
1934 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1935 lpfc_ncmd->start_time = jiffies;
1936 lpfc_ncmd->flags = 0;
1937
1938
1939
1940
1941
1942 sgl = lpfc_ncmd->dma_sgl;
1943 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1944 bf_set(lpfc_sli4_sge_last, sgl, 0);
1945 sgl->word2 = cpu_to_le32(sgl->word2);
1946
1947
1948
1949 memset(wqe, 0, sizeof(union lpfc_wqe));
1950
1951 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1952 atomic_inc(&ndlp->cmd_pending);
1953 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1954 }
1955
1956 } else {
1957 qp = &phba->sli4_hba.hdwq[idx];
1958 qp->empty_io_bufs++;
1959 }
1960
1961 return lpfc_ncmd;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static void
1975lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
1976{
1977 struct lpfc_sli4_hdw_queue *qp;
1978 unsigned long iflag = 0;
1979
1980 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
1981 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
1982
1983 lpfc_ncmd->ndlp = NULL;
1984 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
1985
1986 qp = lpfc_ncmd->hdwq;
1987 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1988 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1989 "6310 XB release deferred for "
1990 "ox_id x%x on reqtag x%x\n",
1991 lpfc_ncmd->cur_iocbq.sli4_xritag,
1992 lpfc_ncmd->cur_iocbq.iotag);
1993
1994 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
1995 list_add_tail(&lpfc_ncmd->list,
1996 &qp->lpfc_abts_io_buf_list);
1997 qp->abts_nvme_io_bufs++;
1998 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
1999 } else
2000 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019int
2020lpfc_nvme_create_localport(struct lpfc_vport *vport)
2021{
2022 int ret = 0;
2023 struct lpfc_hba *phba = vport->phba;
2024 struct nvme_fc_port_info nfcp_info;
2025 struct nvme_fc_local_port *localport;
2026 struct lpfc_nvme_lport *lport;
2027
2028
2029
2030
2031 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2032 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2033 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2034 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2035
2036
2037
2038
2039
2040 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2041
2042
2043
2044
2045 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2046
2047 if (!IS_ENABLED(CONFIG_NVME_FC))
2048 return ret;
2049
2050
2051
2052
2053
2054 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2055 &vport->phba->pcidev->dev, &localport);
2056 if (!ret) {
2057 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2058 "6005 Successfully registered local "
2059 "NVME port num %d, localP x%px, private "
2060 "x%px, sg_seg %d\n",
2061 localport->port_num, localport,
2062 localport->private,
2063 lpfc_nvme_template.max_sgl_segments);
2064
2065
2066 lport = (struct lpfc_nvme_lport *)localport->private;
2067 vport->localport = localport;
2068 lport->vport = vport;
2069 vport->nvmei_support = 1;
2070
2071 atomic_set(&lport->xmt_fcp_noxri, 0);
2072 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2073 atomic_set(&lport->xmt_fcp_qdepth, 0);
2074 atomic_set(&lport->xmt_fcp_err, 0);
2075 atomic_set(&lport->xmt_fcp_wqerr, 0);
2076 atomic_set(&lport->xmt_fcp_abort, 0);
2077 atomic_set(&lport->xmt_ls_abort, 0);
2078 atomic_set(&lport->xmt_ls_err, 0);
2079 atomic_set(&lport->cmpl_fcp_xb, 0);
2080 atomic_set(&lport->cmpl_fcp_err, 0);
2081 atomic_set(&lport->cmpl_ls_xb, 0);
2082 atomic_set(&lport->cmpl_ls_err, 0);
2083
2084 atomic_set(&lport->fc4NvmeLsRequests, 0);
2085 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2086 }
2087
2088 return ret;
2089}
2090
2091#if (IS_ENABLED(CONFIG_NVME_FC))
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102static void
2103lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2104 struct lpfc_nvme_lport *lport,
2105 struct completion *lport_unreg_cmp)
2106{
2107 u32 wait_tmo;
2108 int ret, i, pending = 0;
2109 struct lpfc_sli_ring *pring;
2110 struct lpfc_hba *phba = vport->phba;
2111 struct lpfc_sli4_hdw_queue *qp;
2112 int abts_scsi, abts_nvme;
2113
2114
2115
2116
2117
2118 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2119 while (true) {
2120 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2121 if (unlikely(!ret)) {
2122 pending = 0;
2123 abts_scsi = 0;
2124 abts_nvme = 0;
2125 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2126 qp = &phba->sli4_hba.hdwq[i];
2127 pring = qp->io_wq->pring;
2128 if (!pring)
2129 continue;
2130 pending += pring->txcmplq_cnt;
2131 abts_scsi += qp->abts_scsi_io_bufs;
2132 abts_nvme += qp->abts_nvme_io_bufs;
2133 }
2134 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2135 "6176 Lport x%px Localport x%px wait "
2136 "timed out. Pending %d [%d:%d]. "
2137 "Renewing.\n",
2138 lport, vport->localport, pending,
2139 abts_scsi, abts_nvme);
2140 continue;
2141 }
2142 break;
2143 }
2144 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2145 "6177 Lport x%px Localport x%px Complete Success\n",
2146 lport, vport->localport);
2147}
2148#endif
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160void
2161lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2162{
2163#if (IS_ENABLED(CONFIG_NVME_FC))
2164 struct nvme_fc_local_port *localport;
2165 struct lpfc_nvme_lport *lport;
2166 int ret;
2167 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2168
2169 if (vport->nvmei_support == 0)
2170 return;
2171
2172 localport = vport->localport;
2173 lport = (struct lpfc_nvme_lport *)localport->private;
2174
2175 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2176 "6011 Destroying NVME localport x%px\n",
2177 localport);
2178
2179
2180
2181
2182 lport->lport_unreg_cmp = &lport_unreg_cmp;
2183 ret = nvme_fc_unregister_localport(localport);
2184
2185
2186
2187
2188 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2189 vport->localport = NULL;
2190
2191
2192
2193
2194
2195 vport->nvmei_support = 0;
2196 if (ret == 0) {
2197 lpfc_printf_vlog(vport,
2198 KERN_INFO, LOG_NVME_DISC,
2199 "6009 Unregistered lport Success\n");
2200 } else {
2201 lpfc_printf_vlog(vport,
2202 KERN_INFO, LOG_NVME_DISC,
2203 "6010 Unregistered lport "
2204 "Failed, status x%x\n",
2205 ret);
2206 }
2207#endif
2208}
2209
2210void
2211lpfc_nvme_update_localport(struct lpfc_vport *vport)
2212{
2213#if (IS_ENABLED(CONFIG_NVME_FC))
2214 struct nvme_fc_local_port *localport;
2215 struct lpfc_nvme_lport *lport;
2216
2217 localport = vport->localport;
2218 if (!localport) {
2219 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2220 "6710 Update NVME fail. No localport\n");
2221 return;
2222 }
2223 lport = (struct lpfc_nvme_lport *)localport->private;
2224 if (!lport) {
2225 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2226 "6171 Update NVME fail. localP x%px, No lport\n",
2227 localport);
2228 return;
2229 }
2230 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2231 "6012 Update NVME lport x%px did x%x\n",
2232 localport, vport->fc_myDID);
2233
2234 localport->port_id = vport->fc_myDID;
2235 if (localport->port_id == 0)
2236 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2237 else
2238 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2239
2240 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2241 "6030 bound lport x%px to DID x%06x\n",
2242 lport, localport->port_id);
2243#endif
2244}
2245
2246int
2247lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2248{
2249#if (IS_ENABLED(CONFIG_NVME_FC))
2250 int ret = 0;
2251 struct nvme_fc_local_port *localport;
2252 struct lpfc_nvme_lport *lport;
2253 struct lpfc_nvme_rport *rport;
2254 struct lpfc_nvme_rport *oldrport;
2255 struct nvme_fc_remote_port *remote_port;
2256 struct nvme_fc_port_info rpinfo;
2257 struct lpfc_nodelist *prev_ndlp = NULL;
2258 struct fc_rport *srport = ndlp->rport;
2259
2260 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2261 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2262 ndlp->nlp_DID, ndlp->nlp_type);
2263
2264 localport = vport->localport;
2265 if (!localport)
2266 return 0;
2267
2268 lport = (struct lpfc_nvme_lport *)localport->private;
2269
2270
2271
2272
2273
2274
2275
2276 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2277 rpinfo.port_id = ndlp->nlp_DID;
2278 if (ndlp->nlp_type & NLP_NVME_TARGET)
2279 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2280 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2281 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2282
2283 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2284 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2285
2286 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2287 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2288 if (srport)
2289 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2290 else
2291 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2292
2293 spin_lock_irq(&ndlp->lock);
2294 oldrport = lpfc_ndlp_get_nrport(ndlp);
2295 if (oldrport) {
2296 prev_ndlp = oldrport->ndlp;
2297 spin_unlock_irq(&ndlp->lock);
2298 } else {
2299 spin_unlock_irq(&ndlp->lock);
2300 if (!lpfc_nlp_get(ndlp)) {
2301 dev_warn(&vport->phba->pcidev->dev,
2302 "Warning - No node ref - exit register\n");
2303 return 0;
2304 }
2305 }
2306
2307 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2308 if (!ret) {
2309
2310
2311
2312
2313
2314
2315
2316 spin_lock_irq(&ndlp->lock);
2317 ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2318 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2319 spin_unlock_irq(&ndlp->lock);
2320 rport = remote_port->private;
2321 if (oldrport) {
2322
2323
2324
2325
2326
2327 spin_lock_irq(&ndlp->lock);
2328 ndlp->nrport = NULL;
2329 ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2330 spin_unlock_irq(&ndlp->lock);
2331 rport->ndlp = NULL;
2332 rport->remoteport = NULL;
2333
2334
2335
2336
2337
2338 if (prev_ndlp && prev_ndlp != ndlp) {
2339 if (!prev_ndlp->nrport)
2340 lpfc_nlp_put(prev_ndlp);
2341 }
2342 }
2343
2344
2345 rport->remoteport = remote_port;
2346 rport->lport = lport;
2347 rport->ndlp = ndlp;
2348 spin_lock_irq(&ndlp->lock);
2349 ndlp->nrport = rport;
2350 spin_unlock_irq(&ndlp->lock);
2351 lpfc_printf_vlog(vport, KERN_INFO,
2352 LOG_NVME_DISC | LOG_NODE,
2353 "6022 Bind lport x%px to remoteport x%px "
2354 "rport x%px WWNN 0x%llx, "
2355 "Rport WWPN 0x%llx DID "
2356 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2357 lport, remote_port, rport,
2358 rpinfo.node_name, rpinfo.port_name,
2359 rpinfo.port_id, rpinfo.port_role,
2360 ndlp, prev_ndlp);
2361 } else {
2362 lpfc_printf_vlog(vport, KERN_ERR,
2363 LOG_TRACE_EVENT,
2364 "6031 RemotePort Registration failed "
2365 "err: %d, DID x%06x\n",
2366 ret, ndlp->nlp_DID);
2367 }
2368
2369 return ret;
2370#else
2371 return 0;
2372#endif
2373}
2374
2375
2376
2377
2378
2379
2380
2381
2382void
2383lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2384{
2385#if (IS_ENABLED(CONFIG_NVME_FC))
2386 struct lpfc_nvme_rport *nrport;
2387 struct nvme_fc_remote_port *remoteport = NULL;
2388
2389 spin_lock_irq(&ndlp->lock);
2390 nrport = lpfc_ndlp_get_nrport(ndlp);
2391 if (nrport)
2392 remoteport = nrport->remoteport;
2393 spin_unlock_irq(&ndlp->lock);
2394
2395 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2396 "6170 Rescan NPort DID x%06x type x%x "
2397 "state x%x nrport x%px remoteport x%px\n",
2398 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2399 nrport, remoteport);
2400
2401 if (!nrport || !remoteport)
2402 goto rescan_exit;
2403
2404
2405 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2406 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2407 nvme_fc_rescan_remoteport(remoteport);
2408
2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2410 "6172 NVME rescanned DID x%06x "
2411 "port_state x%x\n",
2412 ndlp->nlp_DID, remoteport->port_state);
2413 }
2414 return;
2415 rescan_exit:
2416 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2417 "6169 Skip NVME Rport Rescan, NVME remoteport "
2418 "unregistered\n");
2419#endif
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434void
2435lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2436{
2437#if (IS_ENABLED(CONFIG_NVME_FC))
2438 int ret;
2439 struct nvme_fc_local_port *localport;
2440 struct lpfc_nvme_lport *lport;
2441 struct lpfc_nvme_rport *rport;
2442 struct nvme_fc_remote_port *remoteport = NULL;
2443
2444 localport = vport->localport;
2445
2446
2447
2448
2449 if (!localport)
2450 return;
2451
2452 lport = (struct lpfc_nvme_lport *)localport->private;
2453 if (!lport)
2454 goto input_err;
2455
2456 spin_lock_irq(&ndlp->lock);
2457 rport = lpfc_ndlp_get_nrport(ndlp);
2458 if (rport)
2459 remoteport = rport->remoteport;
2460 spin_unlock_irq(&ndlp->lock);
2461 if (!remoteport)
2462 goto input_err;
2463
2464 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2465 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2466 "port_id x%06x, portstate x%x port type x%x "
2467 "refcnt %d\n",
2468 remoteport, remoteport->port_name,
2469 remoteport->port_id, remoteport->port_state,
2470 ndlp->nlp_type, kref_read(&ndlp->kref));
2471
2472
2473
2474
2475
2476 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2477
2478
2479
2480 spin_lock_irq(&vport->phba->hbalock);
2481 ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
2482 spin_unlock_irq(&vport->phba->hbalock);
2483
2484
2485
2486
2487
2488
2489 if (vport->load_flag & FC_UNLOADING)
2490 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2491
2492 ret = nvme_fc_unregister_remoteport(remoteport);
2493
2494
2495
2496
2497
2498
2499 ndlp->nrport = NULL;
2500 lpfc_nlp_put(ndlp);
2501 if (ret != 0) {
2502 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2503 "6167 NVME unregister failed %d "
2504 "port_state x%x\n",
2505 ret, remoteport->port_state);
2506 }
2507 }
2508 return;
2509
2510 input_err:
2511#endif
2512 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2513 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2514 vport->localport, ndlp->rport, ndlp->nlp_DID);
2515}
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527void
2528lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2529 struct sli4_wcqe_xri_aborted *axri,
2530 struct lpfc_io_buf *lpfc_ncmd)
2531{
2532 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2533 struct nvmefc_fcp_req *nvme_cmd = NULL;
2534 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2535
2536
2537 if (ndlp)
2538 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2539
2540 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2541 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2542 "xri released\n",
2543 lpfc_ncmd->nvmeCmd, xri,
2544 lpfc_ncmd->cur_iocbq.iotag);
2545
2546
2547
2548
2549
2550 if (lpfc_ncmd->nvmeCmd) {
2551 nvme_cmd = lpfc_ncmd->nvmeCmd;
2552 nvme_cmd->done(nvme_cmd);
2553 lpfc_ncmd->nvmeCmd = NULL;
2554 }
2555 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2556}
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568void
2569lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2570{
2571 struct lpfc_sli_ring *pring;
2572 u32 i, wait_cnt = 0;
2573
2574 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2575 return;
2576
2577
2578
2579
2580 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2581 if (!phba->sli4_hba.hdwq[i].io_wq)
2582 continue;
2583 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2584
2585 if (!pring)
2586 continue;
2587
2588
2589 while (!list_empty(&pring->txcmplq)) {
2590 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2591 wait_cnt++;
2592
2593
2594
2595
2596 if ((wait_cnt % 1000) == 0) {
2597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2598 "6178 NVME IO not empty, "
2599 "cnt %d\n", wait_cnt);
2600 }
2601 }
2602 }
2603
2604
2605 lpfc_issue_hb_tmo(phba);
2606
2607}
2608
2609void
2610lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2611 uint32_t stat, uint32_t param)
2612{
2613#if (IS_ENABLED(CONFIG_NVME_FC))
2614 struct lpfc_io_buf *lpfc_ncmd;
2615 struct nvmefc_fcp_req *nCmd;
2616 struct lpfc_wcqe_complete wcqe;
2617 struct lpfc_wcqe_complete *wcqep = &wcqe;
2618
2619 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2620 if (!lpfc_ncmd) {
2621 lpfc_sli_release_iocbq(phba, pwqeIn);
2622 return;
2623 }
2624
2625 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2626 CMD_ABORT_XRI_CX) {
2627 lpfc_sli_release_iocbq(phba, pwqeIn);
2628 return;
2629 }
2630
2631 spin_lock(&lpfc_ncmd->buf_lock);
2632 nCmd = lpfc_ncmd->nvmeCmd;
2633 if (!nCmd) {
2634 spin_unlock(&lpfc_ncmd->buf_lock);
2635 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2636 return;
2637 }
2638 spin_unlock(&lpfc_ncmd->buf_lock);
2639
2640 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2641 "6194 NVME Cancel xri %x\n",
2642 lpfc_ncmd->cur_iocbq.sli4_xritag);
2643
2644 wcqep->word0 = 0;
2645 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2646 wcqep->parameter = param;
2647 wcqep->word3 = 0;
2648
2649
2650 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2651 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2652
2653 (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
2654#endif
2655}
2656