1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_scsi.h"
48#include "lpfc_nvme.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72static union lpfc_wqe128 lpfc_tsend_cmd_template;
73static union lpfc_wqe128 lpfc_treceive_cmd_template;
74static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76
77void
78lpfc_nvmet_cmd_template(void)
79{
80 union lpfc_wqe128 *wqe;
81
82
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86
87
88
89
90
91
92
93
94
95
96
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103
104
105
106
107
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122
123
124
125
126
127
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
130
131
132
133
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135
136
137
138
139
140
141
142
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148
149
150
151
152
153
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160
161
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168
169
170
171
172
173
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
176
177
178
179
180
181
182
183
184
185
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
191
192
193
194
195
196
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203
204
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211
212
213}
214
215#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216static struct lpfc_async_xchg_ctx *
217lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218{
219 struct lpfc_async_xchg_ctx *ctxp;
220 unsigned long iflag;
221 bool found = false;
222
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226 continue;
227
228 found = true;
229 break;
230 }
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232 if (found)
233 return ctxp;
234
235 return NULL;
236}
237
238static struct lpfc_async_xchg_ctx *
239lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240{
241 struct lpfc_async_xchg_ctx *ctxp;
242 unsigned long iflag;
243 bool found = false;
244
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
248 continue;
249
250 found = true;
251 break;
252 }
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254 if (found)
255 return ctxp;
256
257 return NULL;
258}
259#endif
260
261static void
262lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
264{
265 lockdep_assert_held(&ctxp->ctxlock);
266
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
270
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
272 return;
273
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281}
282
283
284
285
286
287
288
289
290
291
292
293
294void
295__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_wcqe_complete *wcqe)
297{
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300 uint32_t status, result;
301
302 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303 result = wcqe->parameter;
304
305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
308 "%d %d\n",
309 axchg->oxid, axchg->state, axchg->entry_cnt);
310 }
311
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg->oxid, status, result);
314
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status, result, axchg->oxid);
318
319 lpfc_nlp_put(cmdwqe->context1);
320 cmdwqe->context2 = NULL;
321 cmdwqe->context3 = NULL;
322 lpfc_sli_release_iocbq(phba, cmdwqe);
323 ls_rsp->done(ls_rsp);
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status, axchg->oxid);
327 kfree(axchg);
328}
329
330
331
332
333
334
335
336
337
338
339
340
341static void
342lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343 struct lpfc_wcqe_complete *wcqe)
344{
345 struct lpfc_nvmet_tgtport *tgtp;
346 uint32_t status, result;
347
348 if (!phba->targetport)
349 goto finish;
350
351 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352 result = wcqe->parameter;
353
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355 if (tgtp) {
356 if (status) {
357 atomic_inc(&tgtp->xmt_ls_rsp_error);
358 if (result == IOERR_ABORT_REQUESTED)
359 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360 if (bf_get(lpfc_wcqe_c_xb, wcqe))
361 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
362 } else {
363 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
364 }
365 }
366
367finish:
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383void
384lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
385{
386#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
387 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
388 struct lpfc_nvmet_tgtport *tgtp;
389 struct fc_frame_header *fc_hdr;
390 struct rqb_dmabuf *nvmebuf;
391 struct lpfc_nvmet_ctx_info *infop;
392 uint32_t size, oxid, sid;
393 int cpu;
394 unsigned long iflag;
395
396 if (ctxp->state == LPFC_NVME_STE_FREE) {
397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
398 "6411 NVMET free, already free IO x%x: %d %d\n",
399 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
400 }
401
402 if (ctxp->rqb_buffer) {
403 spin_lock_irqsave(&ctxp->ctxlock, iflag);
404 nvmebuf = ctxp->rqb_buffer;
405
406 if (nvmebuf) {
407 ctxp->rqb_buffer = NULL;
408 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
409 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
410 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
411 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
412 nvmebuf);
413 } else {
414 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
415
416 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
417 }
418 } else {
419 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
420 }
421 }
422 ctxp->state = LPFC_NVME_STE_FREE;
423
424 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
425 if (phba->sli4_hba.nvmet_io_wait_cnt) {
426 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
427 nvmebuf, struct rqb_dmabuf,
428 hbuf.list);
429 phba->sli4_hba.nvmet_io_wait_cnt--;
430 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
431 iflag);
432
433 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
434 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
435 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
436 size = nvmebuf->bytes_recv;
437 sid = sli4_sid_from_fc_hdr(fc_hdr);
438
439 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
440 ctxp->wqeq = NULL;
441 ctxp->offset = 0;
442 ctxp->phba = phba;
443 ctxp->size = size;
444 ctxp->oxid = oxid;
445 ctxp->sid = sid;
446 ctxp->state = LPFC_NVME_STE_RCV;
447 ctxp->entry_cnt = 1;
448 ctxp->flag = 0;
449 ctxp->ctxbuf = ctx_buf;
450 ctxp->rqb_buffer = (void *)nvmebuf;
451 spin_lock_init(&ctxp->ctxlock);
452
453#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
454
455 if (ctxp->ts_isr_cmd) {
456 ctxp->ts_cmd_nvme = 0;
457 ctxp->ts_nvme_data = 0;
458 ctxp->ts_data_wqput = 0;
459 ctxp->ts_isr_data = 0;
460 ctxp->ts_data_nvme = 0;
461 ctxp->ts_nvme_status = 0;
462 ctxp->ts_status_wqput = 0;
463 ctxp->ts_isr_status = 0;
464 ctxp->ts_status_nvme = 0;
465 }
466#endif
467 atomic_inc(&tgtp->rcv_fcp_cmd_in);
468
469
470 spin_lock_irqsave(&ctxp->ctxlock, iflag);
471 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
472 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
473
474 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
475 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
477 "6181 Unable to queue deferred work "
478 "for oxid x%x. "
479 "FCP Drop IO [x%x x%x x%x]\n",
480 ctxp->oxid,
481 atomic_read(&tgtp->rcv_fcp_cmd_in),
482 atomic_read(&tgtp->rcv_fcp_cmd_out),
483 atomic_read(&tgtp->xmt_fcp_release));
484
485 spin_lock_irqsave(&ctxp->ctxlock, iflag);
486 lpfc_nvmet_defer_release(phba, ctxp);
487 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
488 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
489 }
490 return;
491 }
492 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
493
494
495
496
497
498 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
499 list_del_init(&ctxp->list);
500 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
501 cpu = raw_smp_processor_id();
502 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
503 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
504 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
505 infop->nvmet_ctx_list_cnt++;
506 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
507#endif
508}
509
510#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
511static void
512lpfc_nvmet_ktime(struct lpfc_hba *phba,
513 struct lpfc_async_xchg_ctx *ctxp)
514{
515 uint64_t seg1, seg2, seg3, seg4, seg5;
516 uint64_t seg6, seg7, seg8, seg9, seg10;
517 uint64_t segsum;
518
519 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
520 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
521 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
522 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
523 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
524 return;
525
526 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
527 return;
528 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
529 return;
530 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
531 return;
532 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
533 return;
534 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
535 return;
536 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
537 return;
538 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
539 return;
540 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
541 return;
542 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
543 return;
544 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
545 return;
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
572 segsum = seg1;
573
574 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
575 if (segsum > seg2)
576 return;
577 seg2 -= segsum;
578 segsum += seg2;
579
580 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
581 if (segsum > seg3)
582 return;
583 seg3 -= segsum;
584 segsum += seg3;
585
586 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
587 if (segsum > seg4)
588 return;
589 seg4 -= segsum;
590 segsum += seg4;
591
592 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
593 if (segsum > seg5)
594 return;
595 seg5 -= segsum;
596 segsum += seg5;
597
598
599
600 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
601 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
602 if (segsum > seg6)
603 return;
604 seg6 -= segsum;
605 segsum += seg6;
606
607 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
608 if (segsum > seg7)
609 return;
610 seg7 -= segsum;
611 segsum += seg7;
612
613 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
614 if (segsum > seg8)
615 return;
616 seg8 -= segsum;
617 segsum += seg8;
618
619 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
620 if (segsum > seg9)
621 return;
622 seg9 -= segsum;
623 segsum += seg9;
624
625 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
626 return;
627 seg10 = (ctxp->ts_isr_status -
628 ctxp->ts_isr_cmd);
629 } else {
630 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
631 return;
632 seg6 = 0;
633 seg7 = 0;
634 seg8 = 0;
635 seg9 = 0;
636 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
637 }
638
639 phba->ktime_seg1_total += seg1;
640 if (seg1 < phba->ktime_seg1_min)
641 phba->ktime_seg1_min = seg1;
642 else if (seg1 > phba->ktime_seg1_max)
643 phba->ktime_seg1_max = seg1;
644
645 phba->ktime_seg2_total += seg2;
646 if (seg2 < phba->ktime_seg2_min)
647 phba->ktime_seg2_min = seg2;
648 else if (seg2 > phba->ktime_seg2_max)
649 phba->ktime_seg2_max = seg2;
650
651 phba->ktime_seg3_total += seg3;
652 if (seg3 < phba->ktime_seg3_min)
653 phba->ktime_seg3_min = seg3;
654 else if (seg3 > phba->ktime_seg3_max)
655 phba->ktime_seg3_max = seg3;
656
657 phba->ktime_seg4_total += seg4;
658 if (seg4 < phba->ktime_seg4_min)
659 phba->ktime_seg4_min = seg4;
660 else if (seg4 > phba->ktime_seg4_max)
661 phba->ktime_seg4_max = seg4;
662
663 phba->ktime_seg5_total += seg5;
664 if (seg5 < phba->ktime_seg5_min)
665 phba->ktime_seg5_min = seg5;
666 else if (seg5 > phba->ktime_seg5_max)
667 phba->ktime_seg5_max = seg5;
668
669 phba->ktime_data_samples++;
670 if (!seg6)
671 goto out;
672
673 phba->ktime_seg6_total += seg6;
674 if (seg6 < phba->ktime_seg6_min)
675 phba->ktime_seg6_min = seg6;
676 else if (seg6 > phba->ktime_seg6_max)
677 phba->ktime_seg6_max = seg6;
678
679 phba->ktime_seg7_total += seg7;
680 if (seg7 < phba->ktime_seg7_min)
681 phba->ktime_seg7_min = seg7;
682 else if (seg7 > phba->ktime_seg7_max)
683 phba->ktime_seg7_max = seg7;
684
685 phba->ktime_seg8_total += seg8;
686 if (seg8 < phba->ktime_seg8_min)
687 phba->ktime_seg8_min = seg8;
688 else if (seg8 > phba->ktime_seg8_max)
689 phba->ktime_seg8_max = seg8;
690
691 phba->ktime_seg9_total += seg9;
692 if (seg9 < phba->ktime_seg9_min)
693 phba->ktime_seg9_min = seg9;
694 else if (seg9 > phba->ktime_seg9_max)
695 phba->ktime_seg9_max = seg9;
696out:
697 phba->ktime_seg10_total += seg10;
698 if (seg10 < phba->ktime_seg10_min)
699 phba->ktime_seg10_min = seg10;
700 else if (seg10 > phba->ktime_seg10_max)
701 phba->ktime_seg10_max = seg10;
702 phba->ktime_status_samples++;
703}
704#endif
705
706
707
708
709
710
711
712
713
714
715
716static void
717lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
718 struct lpfc_wcqe_complete *wcqe)
719{
720 struct lpfc_nvmet_tgtport *tgtp;
721 struct nvmefc_tgt_fcp_req *rsp;
722 struct lpfc_async_xchg_ctx *ctxp;
723 uint32_t status, result, op, start_clean, logerr;
724#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725 int id;
726#endif
727
728 ctxp = cmdwqe->context2;
729 ctxp->flag &= ~LPFC_NVME_IO_INP;
730
731 rsp = &ctxp->hdlrctx.fcp_req;
732 op = rsp->op;
733
734 status = bf_get(lpfc_wcqe_c_status, wcqe);
735 result = wcqe->parameter;
736
737 if (phba->targetport)
738 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
739 else
740 tgtp = NULL;
741
742 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
743 ctxp->oxid, op, status);
744
745 if (status) {
746 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
747 rsp->transferred_length = 0;
748 if (tgtp) {
749 atomic_inc(&tgtp->xmt_fcp_rsp_error);
750 if (result == IOERR_ABORT_REQUESTED)
751 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
752 }
753
754 logerr = LOG_NVME_IOERR;
755
756
757 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
758 ctxp->flag |= LPFC_NVME_XBUSY;
759 logerr |= LOG_NVME_ABTS;
760 if (tgtp)
761 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
762
763 } else {
764 ctxp->flag &= ~LPFC_NVME_XBUSY;
765 }
766
767 lpfc_printf_log(phba, KERN_INFO, logerr,
768 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
769 "XBUSY:x%x\n",
770 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
771 status, result, ctxp->flag);
772
773 } else {
774 rsp->fcp_error = NVME_SC_SUCCESS;
775 if (op == NVMET_FCOP_RSP)
776 rsp->transferred_length = rsp->rsplen;
777 else
778 rsp->transferred_length = rsp->transfer_length;
779 if (tgtp)
780 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
781 }
782
783 if ((op == NVMET_FCOP_READDATA_RSP) ||
784 (op == NVMET_FCOP_RSP)) {
785
786 ctxp->state = LPFC_NVME_STE_DONE;
787 ctxp->entry_cnt++;
788
789#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
790 if (ctxp->ts_cmd_nvme) {
791 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
792 ctxp->ts_isr_data =
793 cmdwqe->isr_timestamp;
794 ctxp->ts_data_nvme =
795 ktime_get_ns();
796 ctxp->ts_nvme_status =
797 ctxp->ts_data_nvme;
798 ctxp->ts_status_wqput =
799 ctxp->ts_data_nvme;
800 ctxp->ts_isr_status =
801 ctxp->ts_data_nvme;
802 ctxp->ts_status_nvme =
803 ctxp->ts_data_nvme;
804 } else {
805 ctxp->ts_isr_status =
806 cmdwqe->isr_timestamp;
807 ctxp->ts_status_nvme =
808 ktime_get_ns();
809 }
810 }
811#endif
812 rsp->done(rsp);
813#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
814 if (ctxp->ts_cmd_nvme)
815 lpfc_nvmet_ktime(phba, ctxp);
816#endif
817
818 } else {
819 ctxp->entry_cnt++;
820 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
821 memset(((char *)cmdwqe) + start_clean, 0,
822 (sizeof(struct lpfc_iocbq) - start_clean));
823#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
824 if (ctxp->ts_cmd_nvme) {
825 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
826 ctxp->ts_data_nvme = ktime_get_ns();
827 }
828#endif
829 rsp->done(rsp);
830 }
831#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
832 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
833 id = raw_smp_processor_id();
834 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
835 if (ctxp->cpu != id)
836 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
837 "6704 CPU Check cmdcmpl: "
838 "cpu %d expect %d\n",
839 id, ctxp->cpu);
840 }
841#endif
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860int
861__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
862 struct nvmefc_ls_rsp *ls_rsp,
863 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
864 struct lpfc_iocbq *cmdwqe,
865 struct lpfc_wcqe_complete *wcqe))
866{
867 struct lpfc_hba *phba = axchg->phba;
868 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
869 struct lpfc_iocbq *nvmewqeq;
870 struct lpfc_dmabuf dmabuf;
871 struct ulp_bde64 bpl;
872 int rc;
873
874 if (phba->pport->load_flag & FC_UNLOADING)
875 return -ENODEV;
876
877 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
878 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
879
880 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
882 "6412 NVMEx LS rsp state mismatch "
883 "oxid x%x: %d %d\n",
884 axchg->oxid, axchg->state, axchg->entry_cnt);
885 return -EALREADY;
886 }
887 axchg->state = LPFC_NVME_STE_LS_RSP;
888 axchg->entry_cnt++;
889
890 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
891 ls_rsp->rsplen);
892 if (nvmewqeq == NULL) {
893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
894 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
895 axchg->oxid);
896 rc = -ENOMEM;
897 goto out_free_buf;
898 }
899
900
901 nvmewqeq->rsvd2 = 1;
902 nvmewqeq->hba_wqidx = 0;
903 nvmewqeq->context3 = &dmabuf;
904 dmabuf.virt = &bpl;
905 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
906 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
907 bpl.tus.f.bdeSize = ls_rsp->rsplen;
908 bpl.tus.f.bdeFlags = 0;
909 bpl.tus.w = le32_to_cpu(bpl.tus.w);
910
911
912
913
914
915
916 nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
917 nvmewqeq->iocb_cmpl = NULL;
918 nvmewqeq->context2 = axchg;
919
920 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
921 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
922
923 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
924
925
926 nvmewqeq->context3 = NULL;
927
928 if (rc == WQE_SUCCESS) {
929
930
931
932
933 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
934 return 0;
935 }
936
937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
939 axchg->oxid, rc);
940
941 rc = -ENXIO;
942
943 lpfc_nlp_put(nvmewqeq->context1);
944
945out_free_buf:
946
947 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
948
949
950
951
952
953
954
955
956 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
957 return rc;
958}
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978static int
979lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
980 struct nvmefc_ls_rsp *ls_rsp)
981{
982 struct lpfc_async_xchg_ctx *axchg =
983 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
984 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
985 int rc;
986
987 if (axchg->phba->pport->load_flag & FC_UNLOADING)
988 return -ENODEV;
989
990 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
991
992 if (rc) {
993 atomic_inc(&nvmep->xmt_ls_drop);
994
995
996
997
998
999 if (rc != -EALREADY)
1000 atomic_inc(&nvmep->xmt_ls_abort);
1001 return rc;
1002 }
1003
1004 atomic_inc(&nvmep->xmt_ls_rsp);
1005 return 0;
1006}
1007
1008static int
1009lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010 struct nvmefc_tgt_fcp_req *rsp)
1011{
1012 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 struct lpfc_async_xchg_ctx *ctxp =
1014 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015 struct lpfc_hba *phba = ctxp->phba;
1016 struct lpfc_queue *wq;
1017 struct lpfc_iocbq *nvmewqeq;
1018 struct lpfc_sli_ring *pring;
1019 unsigned long iflags;
1020 int rc;
1021#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1022 int id;
1023#endif
1024
1025 if (phba->pport->load_flag & FC_UNLOADING) {
1026 rc = -ENODEV;
1027 goto aerr;
1028 }
1029
1030#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp->ts_cmd_nvme) {
1032 if (rsp->op == NVMET_FCOP_RSP)
1033 ctxp->ts_nvme_status = ktime_get_ns();
1034 else
1035 ctxp->ts_nvme_data = ktime_get_ns();
1036 }
1037
1038
1039 if (!ctxp->hdwq)
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1041
1042 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043 id = raw_smp_processor_id();
1044 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045 if (rsp->hwqid != id)
1046 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1049 id, rsp->hwqid);
1050 ctxp->cpu = id;
1051 }
1052#endif
1053
1054
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059 "6102 IO oxid x%x aborted\n",
1060 ctxp->oxid);
1061 rc = -ENXIO;
1062 goto aerr;
1063 }
1064
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066 if (nvmewqeq == NULL) {
1067 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069 "6152 FCP Drop IO x%x: Prep\n",
1070 ctxp->oxid);
1071 rc = -ENXIO;
1072 goto aerr;
1073 }
1074
1075 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076 nvmewqeq->iocb_cmpl = NULL;
1077 nvmewqeq->context2 = ctxp;
1078 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1079 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1080
1081 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1082 ctxp->oxid, rsp->op, rsp->rsplen);
1083
1084 ctxp->flag |= LPFC_NVME_IO_INP;
1085 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1086 if (rc == WQE_SUCCESS) {
1087#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1088 if (!ctxp->ts_cmd_nvme)
1089 return 0;
1090 if (rsp->op == NVMET_FCOP_RSP)
1091 ctxp->ts_status_wqput = ktime_get_ns();
1092 else
1093 ctxp->ts_data_wqput = ktime_get_ns();
1094#endif
1095 return 0;
1096 }
1097
1098 if (rc == -EBUSY) {
1099
1100
1101
1102
1103 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1104 wq = ctxp->hdwq->io_wq;
1105 pring = wq->pring;
1106 spin_lock_irqsave(&pring->ring_lock, iflags);
1107 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1108 wq->q_flag |= HBA_NVMET_WQFULL;
1109 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1110 atomic_inc(&lpfc_nvmep->defer_wqfull);
1111 return 0;
1112 }
1113
1114
1115 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1117 "6153 FCP Drop IO x%x: Issue: %d\n",
1118 ctxp->oxid, rc);
1119
1120 ctxp->wqeq->hba_wqidx = 0;
1121 nvmewqeq->context2 = NULL;
1122 nvmewqeq->context3 = NULL;
1123 rc = -EBUSY;
1124aerr:
1125 return rc;
1126}
1127
1128static void
1129lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1130{
1131 struct lpfc_nvmet_tgtport *tport = targetport->private;
1132
1133
1134 if (tport->phba->targetport)
1135 complete(tport->tport_unreg_cmp);
1136}
1137
1138static void
1139lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1140 struct nvmefc_tgt_fcp_req *req)
1141{
1142 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1143 struct lpfc_async_xchg_ctx *ctxp =
1144 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1145 struct lpfc_hba *phba = ctxp->phba;
1146 struct lpfc_queue *wq;
1147 unsigned long flags;
1148
1149 if (phba->pport->load_flag & FC_UNLOADING)
1150 return;
1151
1152 if (!ctxp->hdwq)
1153 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1154
1155 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1156 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1157 ctxp->oxid, ctxp->flag, ctxp->state);
1158
1159 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1160 ctxp->oxid, ctxp->flag, ctxp->state);
1161
1162 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1163
1164 spin_lock_irqsave(&ctxp->ctxlock, flags);
1165
1166
1167
1168
1169 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1170 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1171 return;
1172 }
1173 ctxp->flag |= LPFC_NVME_ABORT_OP;
1174
1175 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1176 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1177 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1178 ctxp->oxid);
1179 wq = ctxp->hdwq->io_wq;
1180 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1181 return;
1182 }
1183 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1184
1185
1186
1187
1188
1189 if (ctxp->state == LPFC_NVME_STE_RCV)
1190 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1191 ctxp->oxid);
1192 else
1193 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1194 ctxp->oxid);
1195}
1196
1197static void
1198lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1199 struct nvmefc_tgt_fcp_req *rsp)
1200{
1201 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1202 struct lpfc_async_xchg_ctx *ctxp =
1203 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1204 struct lpfc_hba *phba = ctxp->phba;
1205 unsigned long flags;
1206 bool aborting = false;
1207
1208 spin_lock_irqsave(&ctxp->ctxlock, flags);
1209 if (ctxp->flag & LPFC_NVME_XBUSY)
1210 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1211 "6027 NVMET release with XBUSY flag x%x"
1212 " oxid x%x\n",
1213 ctxp->flag, ctxp->oxid);
1214 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1215 ctxp->state != LPFC_NVME_STE_ABORT)
1216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1217 "6413 NVMET release bad state %d %d oxid x%x\n",
1218 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1219
1220 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1221 (ctxp->flag & LPFC_NVME_XBUSY)) {
1222 aborting = true;
1223
1224 lpfc_nvmet_defer_release(phba, ctxp);
1225 }
1226 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1227
1228 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1229 ctxp->state, aborting);
1230
1231 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1232 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1233
1234 if (aborting)
1235 return;
1236
1237 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1238}
1239
1240static void
1241lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1242 struct nvmefc_tgt_fcp_req *rsp)
1243{
1244 struct lpfc_nvmet_tgtport *tgtp;
1245 struct lpfc_async_xchg_ctx *ctxp =
1246 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1247 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1248 struct lpfc_hba *phba = ctxp->phba;
1249 unsigned long iflag;
1250
1251
1252 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1253 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1254
1255 if (!nvmebuf) {
1256 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1257 "6425 Defer rcv: no buffer oxid x%x: "
1258 "flg %x ste %x\n",
1259 ctxp->oxid, ctxp->flag, ctxp->state);
1260 return;
1261 }
1262
1263 tgtp = phba->targetport->private;
1264 if (tgtp)
1265 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1266
1267
1268 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1269 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1270 ctxp->rqb_buffer = NULL;
1271 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static void
1285lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1286 struct lpfc_wcqe_complete *wcqe)
1287{
1288 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static int
1306lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1307 void *hosthandle,
1308 struct nvmefc_ls_req *pnvme_lsreq)
1309{
1310 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311 struct lpfc_hba *phba;
1312 struct lpfc_nodelist *ndlp;
1313 int ret;
1314 u32 hstate;
1315
1316 if (!lpfc_nvmet)
1317 return -EINVAL;
1318
1319 phba = lpfc_nvmet->phba;
1320 if (phba->pport->load_flag & FC_UNLOADING)
1321 return -EINVAL;
1322
1323 hstate = atomic_read(&lpfc_nvmet->state);
1324 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1325 return -EACCES;
1326
1327 ndlp = (struct lpfc_nodelist *)hosthandle;
1328
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330 lpfc_nvmet_ls_req_cmp);
1331
1332 return ret;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345static void
1346lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1347 void *hosthandle,
1348 struct nvmefc_ls_req *pnvme_lsreq)
1349{
1350 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351 struct lpfc_hba *phba;
1352 struct lpfc_nodelist *ndlp;
1353 int ret;
1354
1355 phba = lpfc_nvmet->phba;
1356 if (phba->pport->load_flag & FC_UNLOADING)
1357 return;
1358
1359 ndlp = (struct lpfc_nodelist *)hosthandle;
1360
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1362 if (!ret)
1363 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1364}
1365
1366static void
1367lpfc_nvmet_host_release(void *hosthandle)
1368{
1369 struct lpfc_nodelist *ndlp = hosthandle;
1370 struct lpfc_hba *phba = ndlp->phba;
1371 struct lpfc_nvmet_tgtport *tgtp;
1372
1373 if (!phba->targetport || !phba->targetport->private)
1374 return;
1375
1376 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377 "6202 NVMET XPT releasing hosthandle x%px "
1378 "DID x%x xflags x%x refcnt %d\n",
1379 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380 kref_read(&ndlp->kref));
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 spin_lock_irq(&ndlp->lock);
1383 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384 spin_unlock_irq(&ndlp->lock);
1385 lpfc_nlp_put(ndlp);
1386 atomic_set(&tgtp->state, 0);
1387}
1388
1389static void
1390lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1391{
1392 struct lpfc_nvmet_tgtport *tgtp;
1393 struct lpfc_hba *phba;
1394 uint32_t rc;
1395
1396 tgtp = tgtport->private;
1397 phba = tgtp->phba;
1398
1399 rc = lpfc_issue_els_rscn(phba->pport, 0);
1400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401 "6420 NVMET subsystem change: Notification %s\n",
1402 (rc) ? "Failed" : "Sent");
1403}
1404
1405static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406 .targetport_delete = lpfc_nvmet_targetport_delete,
1407 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1408 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1409 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1410 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411 .defer_rcv = lpfc_nvmet_defer_rcv,
1412 .discovery_event = lpfc_nvmet_discovery_event,
1413 .ls_req = lpfc_nvmet_ls_req,
1414 .ls_abort = lpfc_nvmet_ls_abort,
1415 .host_release = lpfc_nvmet_host_release,
1416
1417 .max_hw_queues = 1,
1418 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420 .dma_boundary = 0xFFFFFFFF,
1421
1422
1423 .target_features = 0,
1424
1425 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426 .lsrqst_priv_sz = 0,
1427};
1428
1429static void
1430__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431 struct lpfc_nvmet_ctx_info *infop)
1432{
1433 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438 &infop->nvmet_ctx_list, list) {
1439 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440 list_del_init(&ctx_buf->list);
1441 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1442
1443 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1444 ctx_buf->sglq->state = SGL_FREED;
1445 ctx_buf->sglq->ndlp = NULL;
1446
1447 spin_lock(&phba->sli4_hba.sgl_list_lock);
1448 list_add_tail(&ctx_buf->sglq->list,
1449 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1450 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1451
1452 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1453 kfree(ctx_buf->context);
1454 }
1455 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1456}
1457
1458static void
1459lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1460{
1461 struct lpfc_nvmet_ctx_info *infop;
1462 int i, j;
1463
1464
1465 infop = phba->sli4_hba.nvmet_ctx_info;
1466 if (!infop)
1467 return;
1468
1469
1470 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1471 for_each_present_cpu(j) {
1472 infop = lpfc_get_ctx_list(phba, j, i);
1473 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1474 }
1475 }
1476 kfree(phba->sli4_hba.nvmet_ctx_info);
1477 phba->sli4_hba.nvmet_ctx_info = NULL;
1478}
1479
1480static int
1481lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1482{
1483 struct lpfc_nvmet_ctxbuf *ctx_buf;
1484 struct lpfc_iocbq *nvmewqe;
1485 union lpfc_wqe128 *wqe;
1486 struct lpfc_nvmet_ctx_info *last_infop;
1487 struct lpfc_nvmet_ctx_info *infop;
1488 int i, j, idx, cpu;
1489
1490 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1491 "6403 Allocate NVMET resources for %d XRIs\n",
1492 phba->sli4_hba.nvmet_xri_cnt);
1493
1494 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1495 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1496 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1497 if (!phba->sli4_hba.nvmet_ctx_info) {
1498 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1499 "6419 Failed allocate memory for "
1500 "nvmet context lists\n");
1501 return -ENOMEM;
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 for_each_possible_cpu(i) {
1524 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1525 infop = lpfc_get_ctx_list(phba, i, j);
1526 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1527 spin_lock_init(&infop->nvmet_ctx_list_lock);
1528 infop->nvmet_ctx_list_cnt = 0;
1529 }
1530 }
1531
1532
1533
1534
1535
1536
1537 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1538 last_infop = lpfc_get_ctx_list(phba,
1539 cpumask_first(cpu_present_mask),
1540 j);
1541 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1542 infop = lpfc_get_ctx_list(phba, i, j);
1543 infop->nvmet_ctx_next_cpu = last_infop;
1544 last_infop = infop;
1545 }
1546 }
1547
1548
1549
1550
1551 idx = 0;
1552 cpu = cpumask_first(cpu_present_mask);
1553 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1554 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1555 if (!ctx_buf) {
1556 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1557 "6404 Ran out of memory for NVMET\n");
1558 return -ENOMEM;
1559 }
1560
1561 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1562 GFP_KERNEL);
1563 if (!ctx_buf->context) {
1564 kfree(ctx_buf);
1565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1566 "6405 Ran out of NVMET "
1567 "context memory\n");
1568 return -ENOMEM;
1569 }
1570 ctx_buf->context->ctxbuf = ctx_buf;
1571 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1572
1573 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1574 if (!ctx_buf->iocbq) {
1575 kfree(ctx_buf->context);
1576 kfree(ctx_buf);
1577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1578 "6406 Ran out of NVMET iocb/WQEs\n");
1579 return -ENOMEM;
1580 }
1581 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1582 nvmewqe = ctx_buf->iocbq;
1583 wqe = &nvmewqe->wqe;
1584
1585
1586 memset(wqe, 0, sizeof(union lpfc_wqe));
1587
1588 ctx_buf->iocbq->context1 = NULL;
1589 spin_lock(&phba->sli4_hba.sgl_list_lock);
1590 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1591 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1592 if (!ctx_buf->sglq) {
1593 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1594 kfree(ctx_buf->context);
1595 kfree(ctx_buf);
1596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1597 "6407 Ran out of NVMET XRIs\n");
1598 return -ENOMEM;
1599 }
1600 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1601
1602
1603
1604
1605
1606
1607 infop = lpfc_get_ctx_list(phba, cpu, idx);
1608 spin_lock(&infop->nvmet_ctx_list_lock);
1609 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1610 infop->nvmet_ctx_list_cnt++;
1611 spin_unlock(&infop->nvmet_ctx_list_lock);
1612
1613
1614 idx++;
1615 if (idx >= phba->cfg_nvmet_mrq) {
1616 idx = 0;
1617 cpu = cpumask_first(cpu_present_mask);
1618 continue;
1619 }
1620 cpu = cpumask_next(cpu, cpu_present_mask);
1621 if (cpu == nr_cpu_ids)
1622 cpu = cpumask_first(cpu_present_mask);
1623
1624 }
1625
1626 for_each_present_cpu(i) {
1627 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1628 infop = lpfc_get_ctx_list(phba, i, j);
1629 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1630 "6408 TOTAL NVMET ctx for CPU %d "
1631 "MRQ %d: cnt %d nextcpu x%px\n",
1632 i, j, infop->nvmet_ctx_list_cnt,
1633 infop->nvmet_ctx_next_cpu);
1634 }
1635 }
1636 return 0;
1637}
1638
1639int
1640lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1641{
1642 struct lpfc_vport *vport = phba->pport;
1643 struct lpfc_nvmet_tgtport *tgtp;
1644 struct nvmet_fc_port_info pinfo;
1645 int error;
1646
1647 if (phba->targetport)
1648 return 0;
1649
1650 error = lpfc_nvmet_setup_io_context(phba);
1651 if (error)
1652 return error;
1653
1654 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1655 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1656 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1657 pinfo.port_id = vport->fc_myDID;
1658
1659
1660
1661
1662
1663 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1664 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1665 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1666
1667#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1668 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1669 &phba->pcidev->dev,
1670 &phba->targetport);
1671#else
1672 error = -ENOENT;
1673#endif
1674 if (error) {
1675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1676 "6025 Cannot register NVME targetport x%x: "
1677 "portnm %llx nodenm %llx segs %d qs %d\n",
1678 error,
1679 pinfo.port_name, pinfo.node_name,
1680 lpfc_tgttemplate.max_sgl_segments,
1681 lpfc_tgttemplate.max_hw_queues);
1682 phba->targetport = NULL;
1683 phba->nvmet_support = 0;
1684
1685 lpfc_nvmet_cleanup_io_context(phba);
1686
1687 } else {
1688 tgtp = (struct lpfc_nvmet_tgtport *)
1689 phba->targetport->private;
1690 tgtp->phba = phba;
1691
1692 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1693 "6026 Registered NVME "
1694 "targetport: x%px, private x%px "
1695 "portnm %llx nodenm %llx segs %d qs %d\n",
1696 phba->targetport, tgtp,
1697 pinfo.port_name, pinfo.node_name,
1698 lpfc_tgttemplate.max_sgl_segments,
1699 lpfc_tgttemplate.max_hw_queues);
1700
1701 atomic_set(&tgtp->rcv_ls_req_in, 0);
1702 atomic_set(&tgtp->rcv_ls_req_out, 0);
1703 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1704 atomic_set(&tgtp->xmt_ls_abort, 0);
1705 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1706 atomic_set(&tgtp->xmt_ls_rsp, 0);
1707 atomic_set(&tgtp->xmt_ls_drop, 0);
1708 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1710 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1711 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1712 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1713 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1714 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1715 atomic_set(&tgtp->xmt_fcp_drop, 0);
1716 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1717 atomic_set(&tgtp->xmt_fcp_read, 0);
1718 atomic_set(&tgtp->xmt_fcp_write, 0);
1719 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1720 atomic_set(&tgtp->xmt_fcp_release, 0);
1721 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1723 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1724 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1726 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1727 atomic_set(&tgtp->xmt_fcp_abort, 0);
1728 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1729 atomic_set(&tgtp->xmt_abort_unsol, 0);
1730 atomic_set(&tgtp->xmt_abort_sol, 0);
1731 atomic_set(&tgtp->xmt_abort_rsp, 0);
1732 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1733 atomic_set(&tgtp->defer_ctx, 0);
1734 atomic_set(&tgtp->defer_fod, 0);
1735 atomic_set(&tgtp->defer_wqfull, 0);
1736 }
1737 return error;
1738}
1739
1740int
1741lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1742{
1743 struct lpfc_vport *vport = phba->pport;
1744
1745 if (!phba->targetport)
1746 return 0;
1747
1748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1749 "6007 Update NVMET port x%px did x%x\n",
1750 phba->targetport, vport->fc_myDID);
1751
1752 phba->targetport->port_id = vport->fc_myDID;
1753 return 0;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764void
1765lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1766 struct sli4_wcqe_xri_aborted *axri)
1767{
1768#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1769 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1770 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1771 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1772 struct lpfc_nvmet_tgtport *tgtp;
1773 struct nvmefc_tgt_fcp_req *req = NULL;
1774 struct lpfc_nodelist *ndlp;
1775 unsigned long iflag = 0;
1776 int rrq_empty = 0;
1777 bool released = false;
1778
1779 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1780 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1781
1782 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1783 return;
1784
1785 if (phba->targetport) {
1786 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1787 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1788 }
1789
1790 spin_lock_irqsave(&phba->hbalock, iflag);
1791 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1792 list_for_each_entry_safe(ctxp, next_ctxp,
1793 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1794 list) {
1795 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1796 continue;
1797
1798 spin_lock(&ctxp->ctxlock);
1799
1800
1801
1802 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1803 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1804 list_del_init(&ctxp->list);
1805 released = true;
1806 }
1807 ctxp->flag &= ~LPFC_NVME_XBUSY;
1808 spin_unlock(&ctxp->ctxlock);
1809 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1810
1811 rrq_empty = list_empty(&phba->active_rrq_list);
1812 spin_unlock_irqrestore(&phba->hbalock, iflag);
1813 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1814 if (ndlp &&
1815 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1816 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1817 lpfc_set_rrq_active(phba, ndlp,
1818 ctxp->ctxbuf->sglq->sli4_lxritag,
1819 rxid, 1);
1820 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1821 }
1822
1823 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1824 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1825 ctxp->oxid, ctxp->flag, released);
1826 if (released)
1827 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1828
1829 if (rrq_empty)
1830 lpfc_worker_wake_up(phba);
1831 return;
1832 }
1833 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1834 spin_unlock_irqrestore(&phba->hbalock, iflag);
1835
1836 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1837 if (ctxp) {
1838
1839
1840
1841
1842 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1843 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1844 "flag x%x oxid x%x rxid x%x\n",
1845 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1846 rxid);
1847
1848 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1849 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1850 ctxp->state = LPFC_NVME_STE_ABORT;
1851 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1852
1853 lpfc_nvmeio_data(phba,
1854 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1855 xri, raw_smp_processor_id(), 0);
1856
1857 req = &ctxp->hdlrctx.fcp_req;
1858 if (req)
1859 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1860 }
1861#endif
1862}
1863
1864int
1865lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1866 struct fc_frame_header *fc_hdr)
1867{
1868#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1869 struct lpfc_hba *phba = vport->phba;
1870 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1871 struct nvmefc_tgt_fcp_req *rsp;
1872 uint32_t sid;
1873 uint16_t oxid, xri;
1874 unsigned long iflag = 0;
1875
1876 sid = sli4_sid_from_fc_hdr(fc_hdr);
1877 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1878
1879 spin_lock_irqsave(&phba->hbalock, iflag);
1880 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1881 list_for_each_entry_safe(ctxp, next_ctxp,
1882 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1883 list) {
1884 if (ctxp->oxid != oxid || ctxp->sid != sid)
1885 continue;
1886
1887 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1888
1889 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1890 spin_unlock_irqrestore(&phba->hbalock, iflag);
1891
1892 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1893 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1894 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1895
1896 lpfc_nvmeio_data(phba,
1897 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1898 xri, raw_smp_processor_id(), 0);
1899
1900 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1901 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1902
1903 rsp = &ctxp->hdlrctx.fcp_req;
1904 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1905
1906
1907 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1908 return 0;
1909 }
1910 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1911 spin_unlock_irqrestore(&phba->hbalock, iflag);
1912
1913
1914 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1915 struct rqb_dmabuf *nvmebuf;
1916 struct fc_frame_header *fc_hdr_tmp;
1917 u32 sid_tmp;
1918 u16 oxid_tmp;
1919 bool found = false;
1920
1921 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1922
1923
1924 list_for_each_entry(nvmebuf,
1925 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1926 hbuf.list) {
1927 fc_hdr_tmp = (struct fc_frame_header *)
1928 (nvmebuf->hbuf.virt);
1929 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1930 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1931 if (oxid_tmp != oxid || sid_tmp != sid)
1932 continue;
1933
1934 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1935 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1936 "is waiting for a ctxp\n",
1937 oxid, sid);
1938
1939 list_del_init(&nvmebuf->hbuf.list);
1940 phba->sli4_hba.nvmet_io_wait_cnt--;
1941 found = true;
1942 break;
1943 }
1944 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1945 iflag);
1946
1947
1948 if (found) {
1949 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1950
1951 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1952 return 0;
1953 }
1954 }
1955
1956
1957 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1958 if (ctxp) {
1959 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1960
1961 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1962 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1963 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1964
1965 lpfc_nvmeio_data(phba,
1966 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1967 xri, raw_smp_processor_id(), 0);
1968
1969 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1970 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1971 "flag x%x state x%x\n",
1972 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1973
1974 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1975
1976 nvmet_fc_rcv_fcp_abort(phba->targetport,
1977 &ctxp->hdlrctx.fcp_req);
1978 } else {
1979 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1980 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1981 lpfc_nvmet_defer_release(phba, ctxp);
1982 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1983 }
1984 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1985 ctxp->oxid);
1986
1987 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1988 return 0;
1989 }
1990
1991 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1992 oxid, raw_smp_processor_id(), 1);
1993
1994 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1995 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1996
1997
1998 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1999#endif
2000 return 0;
2001}
2002
2003static void
2004lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2005 struct lpfc_async_xchg_ctx *ctxp)
2006{
2007 struct lpfc_sli_ring *pring;
2008 struct lpfc_iocbq *nvmewqeq;
2009 struct lpfc_iocbq *next_nvmewqeq;
2010 unsigned long iflags;
2011 struct lpfc_wcqe_complete wcqe;
2012 struct lpfc_wcqe_complete *wcqep;
2013
2014 pring = wq->pring;
2015 wcqep = &wcqe;
2016
2017
2018 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2019 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2020 wcqep->parameter = IOERR_ABORT_REQUESTED;
2021
2022 spin_lock_irqsave(&pring->ring_lock, iflags);
2023 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2024 &wq->wqfull_list, list) {
2025 if (ctxp) {
2026
2027 if (nvmewqeq->context2 == ctxp) {
2028 list_del(&nvmewqeq->list);
2029 spin_unlock_irqrestore(&pring->ring_lock,
2030 iflags);
2031 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2032 wcqep);
2033 return;
2034 }
2035 continue;
2036 } else {
2037
2038 list_del(&nvmewqeq->list);
2039 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2040 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2041 spin_lock_irqsave(&pring->ring_lock, iflags);
2042 }
2043 }
2044 if (!ctxp)
2045 wq->q_flag &= ~HBA_NVMET_WQFULL;
2046 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2047}
2048
2049void
2050lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2051 struct lpfc_queue *wq)
2052{
2053#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2054 struct lpfc_sli_ring *pring;
2055 struct lpfc_iocbq *nvmewqeq;
2056 struct lpfc_async_xchg_ctx *ctxp;
2057 unsigned long iflags;
2058 int rc;
2059
2060
2061
2062
2063
2064 pring = wq->pring;
2065 spin_lock_irqsave(&pring->ring_lock, iflags);
2066 while (!list_empty(&wq->wqfull_list)) {
2067 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2068 list);
2069 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2070 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2071 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2072 spin_lock_irqsave(&pring->ring_lock, iflags);
2073 if (rc == -EBUSY) {
2074
2075 list_add(&nvmewqeq->list, &wq->wqfull_list);
2076 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2077 return;
2078 }
2079 if (rc == WQE_SUCCESS) {
2080#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2081 if (ctxp->ts_cmd_nvme) {
2082 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2083 ctxp->ts_status_wqput = ktime_get_ns();
2084 else
2085 ctxp->ts_data_wqput = ktime_get_ns();
2086 }
2087#endif
2088 } else {
2089 WARN_ON(rc);
2090 }
2091 }
2092 wq->q_flag &= ~HBA_NVMET_WQFULL;
2093 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2094
2095#endif
2096}
2097
2098void
2099lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2100{
2101#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2102 struct lpfc_nvmet_tgtport *tgtp;
2103 struct lpfc_queue *wq;
2104 uint32_t qidx;
2105 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2106
2107 if (phba->nvmet_support == 0)
2108 return;
2109 if (phba->targetport) {
2110 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2111 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2112 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2113 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2114 }
2115 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2116 nvmet_fc_unregister_targetport(phba->targetport);
2117 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2118 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2120 "6179 Unreg targetport x%px timeout "
2121 "reached.\n", phba->targetport);
2122 lpfc_nvmet_cleanup_io_context(phba);
2123 }
2124 phba->targetport = NULL;
2125#endif
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144int
2145lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2146 struct lpfc_async_xchg_ctx *axchg)
2147{
2148#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2149 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2150 uint32_t *payload = axchg->payload;
2151 int rc;
2152
2153 atomic_inc(&tgtp->rcv_ls_req_in);
2154
2155
2156
2157
2158
2159
2160 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2161 axchg->payload, axchg->size);
2162
2163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2164 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2165 "%08x %08x %08x\n", axchg->size, rc,
2166 *payload, *(payload+1), *(payload+2),
2167 *(payload+3), *(payload+4), *(payload+5));
2168
2169 if (!rc) {
2170 atomic_inc(&tgtp->rcv_ls_req_out);
2171 return 0;
2172 }
2173
2174 atomic_inc(&tgtp->rcv_ls_req_drop);
2175#endif
2176 return 1;
2177}
2178
2179static void
2180lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2181{
2182#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2183 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2184 struct lpfc_hba *phba = ctxp->phba;
2185 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2186 struct lpfc_nvmet_tgtport *tgtp;
2187 uint32_t *payload, qno;
2188 uint32_t rc;
2189 unsigned long iflags;
2190
2191 if (!nvmebuf) {
2192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2193 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2194 "oxid: x%x flg: x%x state: x%x\n",
2195 ctxp->oxid, ctxp->flag, ctxp->state);
2196 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2197 lpfc_nvmet_defer_release(phba, ctxp);
2198 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2199 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2200 ctxp->oxid);
2201 return;
2202 }
2203
2204 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2206 "6324 IO oxid x%x aborted\n",
2207 ctxp->oxid);
2208 return;
2209 }
2210
2211 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2212 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2213 ctxp->flag |= LPFC_NVME_TNOTIFY;
2214#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2215 if (ctxp->ts_isr_cmd)
2216 ctxp->ts_cmd_nvme = ktime_get_ns();
2217#endif
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2228 payload, ctxp->size);
2229
2230 if (rc == 0) {
2231 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2232 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2233 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2234 (nvmebuf != ctxp->rqb_buffer)) {
2235 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236 return;
2237 }
2238 ctxp->rqb_buffer = NULL;
2239 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2240 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2241 return;
2242 }
2243
2244
2245 if (rc == -EOVERFLOW) {
2246 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2247 "from %06x\n",
2248 ctxp->oxid, ctxp->size, ctxp->sid);
2249 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2250 atomic_inc(&tgtp->defer_fod);
2251 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2252 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2253 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2254 return;
2255 }
2256 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2257
2258
2259
2260
2261 qno = nvmebuf->idx;
2262 lpfc_post_rq_buffer(
2263 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2264 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2265 return;
2266 }
2267 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2268 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2270 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2271 ctxp->oxid, rc,
2272 atomic_read(&tgtp->rcv_fcp_cmd_in),
2273 atomic_read(&tgtp->rcv_fcp_cmd_out),
2274 atomic_read(&tgtp->xmt_fcp_release));
2275 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2276 ctxp->oxid, ctxp->size, ctxp->sid);
2277 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2278 lpfc_nvmet_defer_release(phba, ctxp);
2279 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2280 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2281#endif
2282}
2283
2284static void
2285lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2286{
2287#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2288 struct lpfc_nvmet_ctxbuf *ctx_buf =
2289 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2290
2291 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2292#endif
2293}
2294
2295static struct lpfc_nvmet_ctxbuf *
2296lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2297 struct lpfc_nvmet_ctx_info *current_infop)
2298{
2299#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2300 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2301 struct lpfc_nvmet_ctx_info *get_infop;
2302 int i;
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314 if (current_infop->nvmet_ctx_start_cpu)
2315 get_infop = current_infop->nvmet_ctx_start_cpu;
2316 else
2317 get_infop = current_infop->nvmet_ctx_next_cpu;
2318
2319 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2320 if (get_infop == current_infop) {
2321 get_infop = get_infop->nvmet_ctx_next_cpu;
2322 continue;
2323 }
2324 spin_lock(&get_infop->nvmet_ctx_list_lock);
2325
2326
2327 if (get_infop->nvmet_ctx_list_cnt) {
2328 list_splice_init(&get_infop->nvmet_ctx_list,
2329 ¤t_infop->nvmet_ctx_list);
2330 current_infop->nvmet_ctx_list_cnt =
2331 get_infop->nvmet_ctx_list_cnt - 1;
2332 get_infop->nvmet_ctx_list_cnt = 0;
2333 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2334
2335 current_infop->nvmet_ctx_start_cpu = get_infop;
2336 list_remove_head(¤t_infop->nvmet_ctx_list,
2337 ctx_buf, struct lpfc_nvmet_ctxbuf,
2338 list);
2339 return ctx_buf;
2340 }
2341
2342
2343 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2344 get_infop = get_infop->nvmet_ctx_next_cpu;
2345 }
2346
2347#endif
2348
2349 return NULL;
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367static void
2368lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2369 uint32_t idx,
2370 struct rqb_dmabuf *nvmebuf,
2371 uint64_t isr_timestamp,
2372 uint8_t cqflag)
2373{
2374 struct lpfc_async_xchg_ctx *ctxp;
2375 struct lpfc_nvmet_tgtport *tgtp;
2376 struct fc_frame_header *fc_hdr;
2377 struct lpfc_nvmet_ctxbuf *ctx_buf;
2378 struct lpfc_nvmet_ctx_info *current_infop;
2379 uint32_t size, oxid, sid, qno;
2380 unsigned long iflag;
2381 int current_cpu;
2382
2383 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2384 return;
2385
2386 ctx_buf = NULL;
2387 if (!nvmebuf || !phba->targetport) {
2388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2389 "6157 NVMET FCP Drop IO\n");
2390 if (nvmebuf)
2391 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2392 return;
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402 current_cpu = raw_smp_processor_id();
2403 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2404 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2405 if (current_infop->nvmet_ctx_list_cnt) {
2406 list_remove_head(¤t_infop->nvmet_ctx_list,
2407 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2408 current_infop->nvmet_ctx_list_cnt--;
2409 } else {
2410 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2411 }
2412 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2413
2414 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2415 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2416 size = nvmebuf->bytes_recv;
2417
2418#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2419 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2420 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2421 if (idx != current_cpu)
2422 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2423 "6703 CPU Check rcv: "
2424 "cpu %d expect %d\n",
2425 current_cpu, idx);
2426 }
2427#endif
2428
2429 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2430 oxid, size, raw_smp_processor_id());
2431
2432 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2433
2434 if (!ctx_buf) {
2435
2436 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2437 list_add_tail(&nvmebuf->hbuf.list,
2438 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2439 phba->sli4_hba.nvmet_io_wait_cnt++;
2440 phba->sli4_hba.nvmet_io_wait_total++;
2441 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2442 iflag);
2443
2444
2445 qno = nvmebuf->idx;
2446 lpfc_post_rq_buffer(
2447 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2448 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2449
2450 atomic_inc(&tgtp->defer_ctx);
2451 return;
2452 }
2453
2454 sid = sli4_sid_from_fc_hdr(fc_hdr);
2455
2456 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2457 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2458 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2459 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2460 if (ctxp->state != LPFC_NVME_STE_FREE) {
2461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2462 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2463 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2464 }
2465 ctxp->wqeq = NULL;
2466 ctxp->offset = 0;
2467 ctxp->phba = phba;
2468 ctxp->size = size;
2469 ctxp->oxid = oxid;
2470 ctxp->sid = sid;
2471 ctxp->idx = idx;
2472 ctxp->state = LPFC_NVME_STE_RCV;
2473 ctxp->entry_cnt = 1;
2474 ctxp->flag = 0;
2475 ctxp->ctxbuf = ctx_buf;
2476 ctxp->rqb_buffer = (void *)nvmebuf;
2477 ctxp->hdwq = NULL;
2478 spin_lock_init(&ctxp->ctxlock);
2479
2480#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2481 if (isr_timestamp)
2482 ctxp->ts_isr_cmd = isr_timestamp;
2483 ctxp->ts_cmd_nvme = 0;
2484 ctxp->ts_nvme_data = 0;
2485 ctxp->ts_data_wqput = 0;
2486 ctxp->ts_isr_data = 0;
2487 ctxp->ts_data_nvme = 0;
2488 ctxp->ts_nvme_status = 0;
2489 ctxp->ts_status_wqput = 0;
2490 ctxp->ts_isr_status = 0;
2491 ctxp->ts_status_nvme = 0;
2492#endif
2493
2494 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2495
2496 if (!cqflag) {
2497 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2498 return;
2499 }
2500
2501 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2502 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2504 "6325 Unable to queue work for oxid x%x. "
2505 "FCP Drop IO [x%x x%x x%x]\n",
2506 ctxp->oxid,
2507 atomic_read(&tgtp->rcv_fcp_cmd_in),
2508 atomic_read(&tgtp->rcv_fcp_cmd_out),
2509 atomic_read(&tgtp->xmt_fcp_release));
2510
2511 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2512 lpfc_nvmet_defer_release(phba, ctxp);
2513 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2514 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2515 }
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532void
2533lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2534 uint32_t idx,
2535 struct rqb_dmabuf *nvmebuf,
2536 uint64_t isr_timestamp,
2537 uint8_t cqflag)
2538{
2539 if (!nvmebuf) {
2540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2541 "3167 NVMET FCP Drop IO\n");
2542 return;
2543 }
2544 if (phba->nvmet_support == 0) {
2545 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2546 return;
2547 }
2548 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576static struct lpfc_iocbq *
2577lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2578 struct lpfc_async_xchg_ctx *ctxp,
2579 dma_addr_t rspbuf, uint16_t rspsize)
2580{
2581 struct lpfc_nodelist *ndlp;
2582 struct lpfc_iocbq *nvmewqe;
2583 union lpfc_wqe128 *wqe;
2584
2585 if (!lpfc_is_link_up(phba)) {
2586 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2587 "6104 NVMET prep LS wqe: link err: "
2588 "NPORT x%x oxid:x%x ste %d\n",
2589 ctxp->sid, ctxp->oxid, ctxp->state);
2590 return NULL;
2591 }
2592
2593
2594 nvmewqe = lpfc_sli_get_iocbq(phba);
2595 if (nvmewqe == NULL) {
2596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2597 "6105 NVMET prep LS wqe: No WQE: "
2598 "NPORT x%x oxid x%x ste %d\n",
2599 ctxp->sid, ctxp->oxid, ctxp->state);
2600 return NULL;
2601 }
2602
2603 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2604 if (!ndlp ||
2605 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2606 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2608 "6106 NVMET prep LS wqe: No ndlp: "
2609 "NPORT x%x oxid x%x ste %d\n",
2610 ctxp->sid, ctxp->oxid, ctxp->state);
2611 goto nvme_wqe_free_wqeq_exit;
2612 }
2613 ctxp->wqeq = nvmewqe;
2614
2615
2616 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2617 if (nvmewqe->context1 == NULL)
2618 goto nvme_wqe_free_wqeq_exit;
2619 nvmewqe->context2 = ctxp;
2620
2621 wqe = &nvmewqe->wqe;
2622 memset(wqe, 0, sizeof(union lpfc_wqe));
2623
2624
2625 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2626 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2627 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2628 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2629
2630
2631
2632
2633
2634
2635 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2636 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2637 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2638 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2639 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2640
2641
2642 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2643 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2644 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2645
2646
2647 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2648 CMD_XMIT_SEQUENCE64_WQE);
2649 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2650 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2651 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2652
2653
2654 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2655
2656
2657 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2658
2659 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2660
2661
2662 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2663 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2664 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2665 LPFC_WQE_LENLOC_WORD12);
2666 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2667
2668
2669 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2670 LPFC_WQE_CQ_ID_DEFAULT);
2671 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2672 OTHER_COMMAND);
2673
2674
2675 wqe->xmit_sequence.xmit_len = rspsize;
2676
2677 nvmewqe->retry = 1;
2678 nvmewqe->vport = phba->pport;
2679 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2680 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2681
2682
2683 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2684 "6039 Xmit NVMET LS response to remote "
2685 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2686 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2687 rspsize);
2688 return nvmewqe;
2689
2690nvme_wqe_free_wqeq_exit:
2691 nvmewqe->context2 = NULL;
2692 nvmewqe->context3 = NULL;
2693 lpfc_sli_release_iocbq(phba, nvmewqe);
2694 return NULL;
2695}
2696
2697
2698static struct lpfc_iocbq *
2699lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2700 struct lpfc_async_xchg_ctx *ctxp)
2701{
2702 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2703 struct lpfc_nvmet_tgtport *tgtp;
2704 struct sli4_sge *sgl;
2705 struct lpfc_nodelist *ndlp;
2706 struct lpfc_iocbq *nvmewqe;
2707 struct scatterlist *sgel;
2708 union lpfc_wqe128 *wqe;
2709 struct ulp_bde64 *bde;
2710 dma_addr_t physaddr;
2711 int i, cnt, nsegs;
2712 int do_pbde;
2713 int xc = 1;
2714
2715 if (!lpfc_is_link_up(phba)) {
2716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2717 "6107 NVMET prep FCP wqe: link err:"
2718 "NPORT x%x oxid x%x ste %d\n",
2719 ctxp->sid, ctxp->oxid, ctxp->state);
2720 return NULL;
2721 }
2722
2723 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2724 if (!ndlp ||
2725 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2726 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2727 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2728 "6108 NVMET prep FCP wqe: no ndlp: "
2729 "NPORT x%x oxid x%x ste %d\n",
2730 ctxp->sid, ctxp->oxid, ctxp->state);
2731 return NULL;
2732 }
2733
2734 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2736 "6109 NVMET prep FCP wqe: seg cnt err: "
2737 "NPORT x%x oxid x%x ste %d cnt %d\n",
2738 ctxp->sid, ctxp->oxid, ctxp->state,
2739 phba->cfg_nvme_seg_cnt);
2740 return NULL;
2741 }
2742 nsegs = rsp->sg_cnt;
2743
2744 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2745 nvmewqe = ctxp->wqeq;
2746 if (nvmewqe == NULL) {
2747
2748 nvmewqe = ctxp->ctxbuf->iocbq;
2749 if (nvmewqe == NULL) {
2750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2751 "6110 NVMET prep FCP wqe: No "
2752 "WQE: NPORT x%x oxid x%x ste %d\n",
2753 ctxp->sid, ctxp->oxid, ctxp->state);
2754 return NULL;
2755 }
2756 ctxp->wqeq = nvmewqe;
2757 xc = 0;
2758 nvmewqe->sli4_lxritag = NO_XRI;
2759 nvmewqe->sli4_xritag = NO_XRI;
2760 }
2761
2762
2763 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2764 (ctxp->entry_cnt == 1)) ||
2765 (ctxp->state == LPFC_NVME_STE_DATA)) {
2766 wqe = &nvmewqe->wqe;
2767 } else {
2768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2769 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2770 ctxp->state, ctxp->entry_cnt);
2771 return NULL;
2772 }
2773
2774 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2775 switch (rsp->op) {
2776 case NVMET_FCOP_READDATA:
2777 case NVMET_FCOP_READDATA_RSP:
2778
2779 memcpy(&wqe->words[7],
2780 &lpfc_tsend_cmd_template.words[7],
2781 sizeof(uint32_t) * 5);
2782
2783
2784 sgel = &rsp->sg[0];
2785 physaddr = sg_dma_address(sgel);
2786 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2787 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2788 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2789 wqe->fcp_tsend.bde.addrHigh =
2790 cpu_to_le32(putPaddrHigh(physaddr));
2791
2792
2793 wqe->fcp_tsend.payload_offset_len = 0;
2794
2795
2796 wqe->fcp_tsend.relative_offset = ctxp->offset;
2797
2798
2799 wqe->fcp_tsend.reserved = 0;
2800
2801
2802 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2803 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2804 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2805 nvmewqe->sli4_xritag);
2806
2807
2808
2809
2810 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2811
2812
2813 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2814 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2815
2816
2817 if (!xc)
2818 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2819
2820
2821 do_pbde = 0;
2822
2823
2824 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2825
2826
2827 sgl->addr_hi = 0;
2828 sgl->addr_lo = 0;
2829 sgl->word2 = 0;
2830 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2831 sgl->word2 = cpu_to_le32(sgl->word2);
2832 sgl->sge_len = 0;
2833 sgl++;
2834 sgl->addr_hi = 0;
2835 sgl->addr_lo = 0;
2836 sgl->word2 = 0;
2837 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2838 sgl->word2 = cpu_to_le32(sgl->word2);
2839 sgl->sge_len = 0;
2840 sgl++;
2841 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2842 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2843
2844
2845
2846 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2847 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2848 bf_set(wqe_sup,
2849 &wqe->fcp_tsend.wqe_com, 1);
2850 } else {
2851 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2852 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2853 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2854 ((rsp->rsplen >> 2) - 1));
2855 memcpy(&wqe->words[16], rsp->rspaddr,
2856 rsp->rsplen);
2857 }
2858 } else {
2859 atomic_inc(&tgtp->xmt_fcp_read);
2860
2861
2862 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2863 }
2864 break;
2865
2866 case NVMET_FCOP_WRITEDATA:
2867
2868 memcpy(&wqe->words[3],
2869 &lpfc_treceive_cmd_template.words[3],
2870 sizeof(uint32_t) * 9);
2871
2872
2873 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2874 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2875 wqe->fcp_treceive.bde.addrLow = 0;
2876 wqe->fcp_treceive.bde.addrHigh = 0;
2877
2878
2879 wqe->fcp_treceive.relative_offset = ctxp->offset;
2880
2881
2882 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2883 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2884 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2885 nvmewqe->sli4_xritag);
2886
2887
2888
2889
2890 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2891
2892
2893 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2894 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2895
2896
2897 if (!xc)
2898 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2899
2900
2901 if (phba->cfg_enable_pbde) {
2902 do_pbde = 1;
2903 } else {
2904 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2905 do_pbde = 0;
2906 }
2907
2908
2909 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2910
2911
2912 sgl->addr_hi = 0;
2913 sgl->addr_lo = 0;
2914 sgl->word2 = 0;
2915 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2916 sgl->word2 = cpu_to_le32(sgl->word2);
2917 sgl->sge_len = 0;
2918 sgl++;
2919 sgl->addr_hi = 0;
2920 sgl->addr_lo = 0;
2921 sgl->word2 = 0;
2922 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2923 sgl->word2 = cpu_to_le32(sgl->word2);
2924 sgl->sge_len = 0;
2925 sgl++;
2926 atomic_inc(&tgtp->xmt_fcp_write);
2927 break;
2928
2929 case NVMET_FCOP_RSP:
2930
2931 memcpy(&wqe->words[4],
2932 &lpfc_trsp_cmd_template.words[4],
2933 sizeof(uint32_t) * 8);
2934
2935
2936 physaddr = rsp->rspdma;
2937 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2938 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2939 wqe->fcp_trsp.bde.addrLow =
2940 cpu_to_le32(putPaddrLow(physaddr));
2941 wqe->fcp_trsp.bde.addrHigh =
2942 cpu_to_le32(putPaddrHigh(physaddr));
2943
2944
2945 wqe->fcp_trsp.response_len = rsp->rsplen;
2946
2947
2948 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2949 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2950 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2951 nvmewqe->sli4_xritag);
2952
2953
2954
2955
2956 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2957
2958
2959 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2960 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2961
2962
2963 if (xc)
2964 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2965
2966
2967
2968 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2969
2970 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2971 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2972 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2973 ((rsp->rsplen >> 2) - 1));
2974 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2975 }
2976 do_pbde = 0;
2977
2978
2979 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2980
2981
2982 nsegs = 0;
2983 sgl->word2 = 0;
2984 atomic_inc(&tgtp->xmt_fcp_rsp);
2985 break;
2986
2987 default:
2988 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2989 "6064 Unknown Rsp Op %d\n",
2990 rsp->op);
2991 return NULL;
2992 }
2993
2994 nvmewqe->retry = 1;
2995 nvmewqe->vport = phba->pport;
2996 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2997 nvmewqe->context1 = ndlp;
2998
2999 for_each_sg(rsp->sg, sgel, nsegs, i) {
3000 physaddr = sg_dma_address(sgel);
3001 cnt = sg_dma_len(sgel);
3002 sgl->addr_hi = putPaddrHigh(physaddr);
3003 sgl->addr_lo = putPaddrLow(physaddr);
3004 sgl->word2 = 0;
3005 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3006 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3007 if ((i+1) == rsp->sg_cnt)
3008 bf_set(lpfc_sli4_sge_last, sgl, 1);
3009 sgl->word2 = cpu_to_le32(sgl->word2);
3010 sgl->sge_len = cpu_to_le32(cnt);
3011 if (i == 0) {
3012 bde = (struct ulp_bde64 *)&wqe->words[13];
3013 if (do_pbde) {
3014
3015 bde->addrLow = sgl->addr_lo;
3016 bde->addrHigh = sgl->addr_hi;
3017 bde->tus.f.bdeSize =
3018 le32_to_cpu(sgl->sge_len);
3019 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3020 bde->tus.w = cpu_to_le32(bde->tus.w);
3021 } else {
3022 memset(bde, 0, sizeof(struct ulp_bde64));
3023 }
3024 }
3025 sgl++;
3026 ctxp->offset += cnt;
3027 }
3028 ctxp->state = LPFC_NVME_STE_DATA;
3029 ctxp->entry_cnt++;
3030 return nvmewqe;
3031}
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043static void
3044lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3045 struct lpfc_wcqe_complete *wcqe)
3046{
3047 struct lpfc_async_xchg_ctx *ctxp;
3048 struct lpfc_nvmet_tgtport *tgtp;
3049 uint32_t result;
3050 unsigned long flags;
3051 bool released = false;
3052
3053 ctxp = cmdwqe->context2;
3054 result = wcqe->parameter;
3055
3056 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3057 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3058 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3059
3060 spin_lock_irqsave(&ctxp->ctxlock, flags);
3061 ctxp->state = LPFC_NVME_STE_DONE;
3062
3063
3064
3065
3066 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3067 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3068 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3069 list_del_init(&ctxp->list);
3070 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3071 released = true;
3072 }
3073 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3074 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3075 atomic_inc(&tgtp->xmt_abort_rsp);
3076
3077 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3078 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3079 "WCQE: %08x %08x %08x %08x\n",
3080 ctxp->oxid, ctxp->flag, released,
3081 wcqe->word0, wcqe->total_data_placed,
3082 result, wcqe->word3);
3083
3084 cmdwqe->context2 = NULL;
3085 cmdwqe->context3 = NULL;
3086
3087
3088
3089
3090 if (released)
3091 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3092
3093
3094 lpfc_sli_release_iocbq(phba, cmdwqe);
3095
3096
3097
3098
3099
3100}
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112static void
3113lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3114 struct lpfc_wcqe_complete *wcqe)
3115{
3116 struct lpfc_async_xchg_ctx *ctxp;
3117 struct lpfc_nvmet_tgtport *tgtp;
3118 unsigned long flags;
3119 uint32_t result;
3120 bool released = false;
3121
3122 ctxp = cmdwqe->context2;
3123 result = wcqe->parameter;
3124
3125 if (!ctxp) {
3126
3127 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3128 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3129 wcqe->word0, wcqe->total_data_placed,
3130 result, wcqe->word3);
3131 return;
3132 }
3133
3134 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3135 spin_lock_irqsave(&ctxp->ctxlock, flags);
3136 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3137 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3138
3139
3140 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3142 "6112 ABTS Wrong state:%d oxid x%x\n",
3143 ctxp->state, ctxp->oxid);
3144 }
3145
3146
3147
3148
3149 ctxp->state = LPFC_NVME_STE_DONE;
3150 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3151 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3152 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3153 list_del_init(&ctxp->list);
3154 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3155 released = true;
3156 }
3157 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3158 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3159 atomic_inc(&tgtp->xmt_abort_rsp);
3160
3161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3162 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3163 "WCQE: %08x %08x %08x %08x\n",
3164 ctxp->oxid, ctxp->flag, released,
3165 wcqe->word0, wcqe->total_data_placed,
3166 result, wcqe->word3);
3167
3168 cmdwqe->context2 = NULL;
3169 cmdwqe->context3 = NULL;
3170
3171
3172
3173
3174 if (released)
3175 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3176
3177
3178
3179
3180
3181}
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static void
3194lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3195 struct lpfc_wcqe_complete *wcqe)
3196{
3197 struct lpfc_async_xchg_ctx *ctxp;
3198 struct lpfc_nvmet_tgtport *tgtp;
3199 uint32_t result;
3200
3201 ctxp = cmdwqe->context2;
3202 result = wcqe->parameter;
3203
3204 if (phba->nvmet_support) {
3205 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3206 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3207 }
3208
3209 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3210 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3211 ctxp, wcqe->word0, wcqe->total_data_placed,
3212 result, wcqe->word3);
3213
3214 if (!ctxp) {
3215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3216 "6415 NVMET LS Abort No ctx: WCQE: "
3217 "%08x %08x %08x %08x\n",
3218 wcqe->word0, wcqe->total_data_placed,
3219 result, wcqe->word3);
3220
3221 lpfc_sli_release_iocbq(phba, cmdwqe);
3222 return;
3223 }
3224
3225 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3227 "6416 NVMET LS abort cmpl state mismatch: "
3228 "oxid x%x: %d %d\n",
3229 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3230 }
3231
3232 cmdwqe->context2 = NULL;
3233 cmdwqe->context3 = NULL;
3234 lpfc_sli_release_iocbq(phba, cmdwqe);
3235 kfree(ctxp);
3236}
3237
3238static int
3239lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3240 struct lpfc_async_xchg_ctx *ctxp,
3241 uint32_t sid, uint16_t xri)
3242{
3243 struct lpfc_nvmet_tgtport *tgtp = NULL;
3244 struct lpfc_iocbq *abts_wqeq;
3245 union lpfc_wqe128 *wqe_abts;
3246 struct lpfc_nodelist *ndlp;
3247
3248 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3249 "6067 ABTS: sid %x xri x%x/x%x\n",
3250 sid, xri, ctxp->wqeq->sli4_xritag);
3251
3252 if (phba->nvmet_support && phba->targetport)
3253 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3254
3255 ndlp = lpfc_findnode_did(phba->pport, sid);
3256 if (!ndlp ||
3257 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3258 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3259 if (tgtp)
3260 atomic_inc(&tgtp->xmt_abort_rsp_error);
3261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3262 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3263 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3264
3265
3266 return 0;
3267 }
3268
3269 abts_wqeq = ctxp->wqeq;
3270 wqe_abts = &abts_wqeq->wqe;
3271
3272
3273
3274
3275
3276 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3277
3278
3279 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3280 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3281 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3282 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3283 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3284
3285
3286 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3287 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3288 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3289 abts_wqeq->sli4_xritag);
3290
3291
3292 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3293 CMD_XMIT_SEQUENCE64_WQE);
3294 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3295 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3296 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3297
3298
3299 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3300
3301
3302 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3303
3304 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3305
3306
3307 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3308 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3309 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3310 LPFC_WQE_LENLOC_WORD12);
3311 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3312 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3313
3314
3315 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3316 LPFC_WQE_CQ_ID_DEFAULT);
3317 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3318 OTHER_COMMAND);
3319
3320 abts_wqeq->vport = phba->pport;
3321 abts_wqeq->context1 = ndlp;
3322 abts_wqeq->context2 = ctxp;
3323 abts_wqeq->context3 = NULL;
3324 abts_wqeq->rsvd2 = 0;
3325
3326 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3327 abts_wqeq->iocb.ulpLe = 1;
3328
3329 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3330 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3331 xri, abts_wqeq->iotag);
3332 return 1;
3333}
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344static void
3345lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
3346{
3347 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3348
3349
3350
3351
3352 memset(wqe, 0, sizeof(*wqe));
3353
3354 if (opt & INHIBIT_ABORT)
3355 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
3356
3357 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
3358
3359 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3360
3361
3362 wqe->abort_cmd.wqe_com.abort_tag = xritag;
3363
3364
3365 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
3366
3367 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
3368 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3369
3370 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3371 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
3372 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3373}
3374
3375static int
3376lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3377 struct lpfc_async_xchg_ctx *ctxp,
3378 uint32_t sid, uint16_t xri)
3379{
3380 struct lpfc_nvmet_tgtport *tgtp;
3381 struct lpfc_iocbq *abts_wqeq;
3382 struct lpfc_nodelist *ndlp;
3383 unsigned long flags;
3384 u8 opt;
3385 int rc;
3386
3387 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3388 if (!ctxp->wqeq) {
3389 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3390 ctxp->wqeq->hba_wqidx = 0;
3391 }
3392
3393 ndlp = lpfc_findnode_did(phba->pport, sid);
3394 if (!ndlp ||
3395 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3396 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3397 atomic_inc(&tgtp->xmt_abort_rsp_error);
3398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3399 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3400 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3401
3402
3403 spin_lock_irqsave(&ctxp->ctxlock, flags);
3404 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3405 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3406 return 0;
3407 }
3408
3409
3410 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3411 spin_lock_irqsave(&ctxp->ctxlock, flags);
3412 if (!ctxp->abort_wqeq) {
3413 atomic_inc(&tgtp->xmt_abort_rsp_error);
3414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3415 "6161 ABORT failed: No wqeqs: "
3416 "xri: x%x\n", ctxp->oxid);
3417
3418 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3419 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3420 return 0;
3421 }
3422 abts_wqeq = ctxp->abort_wqeq;
3423 ctxp->state = LPFC_NVME_STE_ABORT;
3424 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3425 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3426
3427
3428 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3429 "6162 ABORT Request to rport DID x%06x "
3430 "for xri x%x x%x\n",
3431 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3432
3433
3434
3435
3436 spin_lock_irqsave(&phba->hbalock, flags);
3437
3438 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3439 spin_unlock_irqrestore(&phba->hbalock, flags);
3440 atomic_inc(&tgtp->xmt_abort_rsp_error);
3441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3442 "6163 Driver in reset cleanup - flushing "
3443 "NVME Req now. hba_flag x%x oxid x%x\n",
3444 phba->hba_flag, ctxp->oxid);
3445 lpfc_sli_release_iocbq(phba, abts_wqeq);
3446 spin_lock_irqsave(&ctxp->ctxlock, flags);
3447 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3448 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3449 return 0;
3450 }
3451
3452
3453 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3454 spin_unlock_irqrestore(&phba->hbalock, flags);
3455 atomic_inc(&tgtp->xmt_abort_rsp_error);
3456 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3457 "6164 Outstanding NVME I/O Abort Request "
3458 "still pending on oxid x%x\n",
3459 ctxp->oxid);
3460 lpfc_sli_release_iocbq(phba, abts_wqeq);
3461 spin_lock_irqsave(&ctxp->ctxlock, flags);
3462 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3463 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3464 return 0;
3465 }
3466
3467
3468 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3469
3470 lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3471
3472
3473 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3474 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3475 abts_wqeq->iocb_cmpl = NULL;
3476 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3477 abts_wqeq->context2 = ctxp;
3478 abts_wqeq->vport = phba->pport;
3479 if (!ctxp->hdwq)
3480 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3481
3482 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3483 spin_unlock_irqrestore(&phba->hbalock, flags);
3484 if (rc == WQE_SUCCESS) {
3485 atomic_inc(&tgtp->xmt_abort_sol);
3486 return 0;
3487 }
3488
3489 atomic_inc(&tgtp->xmt_abort_rsp_error);
3490 spin_lock_irqsave(&ctxp->ctxlock, flags);
3491 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3492 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3493 lpfc_sli_release_iocbq(phba, abts_wqeq);
3494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3495 "6166 Failed ABORT issue_wqe with status x%x "
3496 "for oxid x%x.\n",
3497 rc, ctxp->oxid);
3498 return 1;
3499}
3500
3501static int
3502lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3503 struct lpfc_async_xchg_ctx *ctxp,
3504 uint32_t sid, uint16_t xri)
3505{
3506 struct lpfc_nvmet_tgtport *tgtp;
3507 struct lpfc_iocbq *abts_wqeq;
3508 unsigned long flags;
3509 bool released = false;
3510 int rc;
3511
3512 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3513 if (!ctxp->wqeq) {
3514 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3515 ctxp->wqeq->hba_wqidx = 0;
3516 }
3517
3518 if (ctxp->state == LPFC_NVME_STE_FREE) {
3519 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3520 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3521 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3522 rc = WQE_BUSY;
3523 goto aerr;
3524 }
3525 ctxp->state = LPFC_NVME_STE_ABORT;
3526 ctxp->entry_cnt++;
3527 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3528 if (rc == 0)
3529 goto aerr;
3530
3531 spin_lock_irqsave(&phba->hbalock, flags);
3532 abts_wqeq = ctxp->wqeq;
3533 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3534 abts_wqeq->iocb_cmpl = NULL;
3535 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3536 if (!ctxp->hdwq)
3537 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3538
3539 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3540 spin_unlock_irqrestore(&phba->hbalock, flags);
3541 if (rc == WQE_SUCCESS) {
3542 return 0;
3543 }
3544
3545aerr:
3546 spin_lock_irqsave(&ctxp->ctxlock, flags);
3547 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3548 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3549 list_del_init(&ctxp->list);
3550 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3551 released = true;
3552 }
3553 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3554 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3555
3556 atomic_inc(&tgtp->xmt_abort_rsp_error);
3557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3558 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3559 "(%x)\n",
3560 ctxp->oxid, rc, released);
3561 if (released)
3562 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3563 return 1;
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574int
3575lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3576 struct lpfc_async_xchg_ctx *ctxp,
3577 uint32_t sid, uint16_t xri)
3578{
3579 struct lpfc_nvmet_tgtport *tgtp = NULL;
3580 struct lpfc_iocbq *abts_wqeq;
3581 unsigned long flags;
3582 int rc;
3583
3584 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3585 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3586 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3587 ctxp->entry_cnt++;
3588 } else {
3589 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3590 "6418 NVMET LS abort state mismatch "
3591 "IO x%x: %d %d\n",
3592 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3593 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3594 }
3595
3596 if (phba->nvmet_support && phba->targetport)
3597 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3598
3599 if (!ctxp->wqeq) {
3600
3601 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3602 if (!ctxp->wqeq) {
3603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3604 "6068 Abort failed: No wqeqs: "
3605 "xri: x%x\n", xri);
3606
3607 kfree(ctxp);
3608 return 0;
3609 }
3610 }
3611 abts_wqeq = ctxp->wqeq;
3612
3613 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3614 rc = WQE_BUSY;
3615 goto out;
3616 }
3617
3618 spin_lock_irqsave(&phba->hbalock, flags);
3619 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3620 abts_wqeq->iocb_cmpl = NULL;
3621 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3622 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3623 spin_unlock_irqrestore(&phba->hbalock, flags);
3624 if (rc == WQE_SUCCESS) {
3625 if (tgtp)
3626 atomic_inc(&tgtp->xmt_abort_unsol);
3627 return 0;
3628 }
3629out:
3630 if (tgtp)
3631 atomic_inc(&tgtp->xmt_abort_rsp_error);
3632 abts_wqeq->context2 = NULL;
3633 abts_wqeq->context3 = NULL;
3634 lpfc_sli_release_iocbq(phba, abts_wqeq);
3635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3636 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3637 return 1;
3638}
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649void
3650lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3651{
3652 u32 ndlp_has_hh;
3653 struct lpfc_nvmet_tgtport *tgtp;
3654
3655 lpfc_printf_log(phba, KERN_INFO,
3656 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3657 "6203 Invalidating hosthandle x%px\n",
3658 ndlp);
3659
3660 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3661 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3662
3663 spin_lock_irq(&ndlp->lock);
3664 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3665 spin_unlock_irq(&ndlp->lock);
3666
3667
3668
3669
3670
3671 if (!ndlp_has_hh) {
3672 lpfc_printf_log(phba, KERN_INFO,
3673 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3674 "6204 Skip invalidate on node x%px DID x%x\n",
3675 ndlp, ndlp->nlp_DID);
3676 return;
3677 }
3678
3679#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3680
3681 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3682#endif
3683}
3684