1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_scsi.h"
48#include "lpfc_nvme.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72static union lpfc_wqe128 lpfc_tsend_cmd_template;
73static union lpfc_wqe128 lpfc_treceive_cmd_template;
74static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76
77void
78lpfc_nvmet_cmd_template(void)
79{
80 union lpfc_wqe128 *wqe;
81
82
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86
87
88
89
90
91
92
93
94
95
96
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103
104
105
106
107
108 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122
123
124
125
126
127
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
130
131
132
133
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135
136
137
138
139
140
141
142
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148
149
150
151
152
153
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160
161
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168
169
170
171
172
173
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
176
177
178
179
180
181
182
183
184
185
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
191
192
193
194
195
196
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203
204
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211
212
213}
214
215#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216static struct lpfc_async_xchg_ctx *
217lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218{
219 struct lpfc_async_xchg_ctx *ctxp;
220 unsigned long iflag;
221 bool found = false;
222
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226 continue;
227
228 found = true;
229 break;
230 }
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232 if (found)
233 return ctxp;
234
235 return NULL;
236}
237
238static struct lpfc_async_xchg_ctx *
239lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240{
241 struct lpfc_async_xchg_ctx *ctxp;
242 unsigned long iflag;
243 bool found = false;
244
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
248 continue;
249
250 found = true;
251 break;
252 }
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254 if (found)
255 return ctxp;
256
257 return NULL;
258}
259#endif
260
261static void
262lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
264{
265 lockdep_assert_held(&ctxp->ctxlock);
266
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
270
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
272 return;
273
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281}
282
283
284
285
286
287
288
289
290
291
292
293
294void
295__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_wcqe_complete *wcqe)
297{
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300 uint32_t status, result;
301
302 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303 result = wcqe->parameter;
304
305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
308 "%d %d\n",
309 axchg->oxid, axchg->state, axchg->entry_cnt);
310 }
311
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg->oxid, status, result);
314
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status, result, axchg->oxid);
318
319 lpfc_nlp_put(cmdwqe->context1);
320 cmdwqe->context2 = NULL;
321 cmdwqe->context3 = NULL;
322 lpfc_sli_release_iocbq(phba, cmdwqe);
323 ls_rsp->done(ls_rsp);
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status, axchg->oxid);
327 kfree(axchg);
328}
329
330
331
332
333
334
335
336
337
338
339
340
341static void
342lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343 struct lpfc_wcqe_complete *wcqe)
344{
345 struct lpfc_nvmet_tgtport *tgtp;
346 uint32_t status, result;
347
348 if (!phba->targetport)
349 goto finish;
350
351 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352 result = wcqe->parameter;
353
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355 if (tgtp) {
356 if (status) {
357 atomic_inc(&tgtp->xmt_ls_rsp_error);
358 if (result == IOERR_ABORT_REQUESTED)
359 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360 if (bf_get(lpfc_wcqe_c_xb, wcqe))
361 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
362 } else {
363 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
364 }
365 }
366
367finish:
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384void
385lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
386{
387#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
388 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
389 struct lpfc_nvmet_tgtport *tgtp;
390 struct fc_frame_header *fc_hdr;
391 struct rqb_dmabuf *nvmebuf;
392 struct lpfc_nvmet_ctx_info *infop;
393 uint32_t size, oxid, sid;
394 int cpu;
395 unsigned long iflag;
396
397 if (ctxp->state == LPFC_NVME_STE_FREE) {
398 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
399 "6411 NVMET free, already free IO x%x: %d %d\n",
400 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
401 }
402
403 if (ctxp->rqb_buffer) {
404 spin_lock_irqsave(&ctxp->ctxlock, iflag);
405 nvmebuf = ctxp->rqb_buffer;
406
407 if (nvmebuf) {
408 ctxp->rqb_buffer = NULL;
409 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
410 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
411 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
412 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
413 nvmebuf);
414 } else {
415 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416
417 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
418 }
419 } else {
420 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
421 }
422 }
423 ctxp->state = LPFC_NVME_STE_FREE;
424
425 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
426 if (phba->sli4_hba.nvmet_io_wait_cnt) {
427 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
428 nvmebuf, struct rqb_dmabuf,
429 hbuf.list);
430 phba->sli4_hba.nvmet_io_wait_cnt--;
431 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
432 iflag);
433
434 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
435 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
436 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
437 size = nvmebuf->bytes_recv;
438 sid = sli4_sid_from_fc_hdr(fc_hdr);
439
440 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
441 ctxp->wqeq = NULL;
442 ctxp->offset = 0;
443 ctxp->phba = phba;
444 ctxp->size = size;
445 ctxp->oxid = oxid;
446 ctxp->sid = sid;
447 ctxp->state = LPFC_NVME_STE_RCV;
448 ctxp->entry_cnt = 1;
449 ctxp->flag = 0;
450 ctxp->ctxbuf = ctx_buf;
451 ctxp->rqb_buffer = (void *)nvmebuf;
452 spin_lock_init(&ctxp->ctxlock);
453
454#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
455
456 if (ctxp->ts_isr_cmd) {
457 ctxp->ts_cmd_nvme = 0;
458 ctxp->ts_nvme_data = 0;
459 ctxp->ts_data_wqput = 0;
460 ctxp->ts_isr_data = 0;
461 ctxp->ts_data_nvme = 0;
462 ctxp->ts_nvme_status = 0;
463 ctxp->ts_status_wqput = 0;
464 ctxp->ts_isr_status = 0;
465 ctxp->ts_status_nvme = 0;
466 }
467#endif
468 atomic_inc(&tgtp->rcv_fcp_cmd_in);
469
470
471 spin_lock_irqsave(&ctxp->ctxlock, iflag);
472 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
473 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
474
475 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
476 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
477 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
478 "6181 Unable to queue deferred work "
479 "for oxid x%x. "
480 "FCP Drop IO [x%x x%x x%x]\n",
481 ctxp->oxid,
482 atomic_read(&tgtp->rcv_fcp_cmd_in),
483 atomic_read(&tgtp->rcv_fcp_cmd_out),
484 atomic_read(&tgtp->xmt_fcp_release));
485
486 spin_lock_irqsave(&ctxp->ctxlock, iflag);
487 lpfc_nvmet_defer_release(phba, ctxp);
488 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
489 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
490 }
491 return;
492 }
493 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
494
495
496
497
498
499 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
500 list_del_init(&ctxp->list);
501 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
502 cpu = raw_smp_processor_id();
503 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
504 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
505 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
506 infop->nvmet_ctx_list_cnt++;
507 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
508#endif
509}
510
511#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
512static void
513lpfc_nvmet_ktime(struct lpfc_hba *phba,
514 struct lpfc_async_xchg_ctx *ctxp)
515{
516 uint64_t seg1, seg2, seg3, seg4, seg5;
517 uint64_t seg6, seg7, seg8, seg9, seg10;
518 uint64_t segsum;
519
520 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
521 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
522 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
523 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
524 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
525 return;
526
527 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
528 return;
529 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
530 return;
531 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
532 return;
533 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
534 return;
535 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
536 return;
537 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
538 return;
539 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
540 return;
541 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
542 return;
543 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
544 return;
545 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
546 return;
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
573 segsum = seg1;
574
575 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
576 if (segsum > seg2)
577 return;
578 seg2 -= segsum;
579 segsum += seg2;
580
581 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
582 if (segsum > seg3)
583 return;
584 seg3 -= segsum;
585 segsum += seg3;
586
587 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
588 if (segsum > seg4)
589 return;
590 seg4 -= segsum;
591 segsum += seg4;
592
593 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
594 if (segsum > seg5)
595 return;
596 seg5 -= segsum;
597 segsum += seg5;
598
599
600
601 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
602 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
603 if (segsum > seg6)
604 return;
605 seg6 -= segsum;
606 segsum += seg6;
607
608 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
609 if (segsum > seg7)
610 return;
611 seg7 -= segsum;
612 segsum += seg7;
613
614 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
615 if (segsum > seg8)
616 return;
617 seg8 -= segsum;
618 segsum += seg8;
619
620 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
621 if (segsum > seg9)
622 return;
623 seg9 -= segsum;
624 segsum += seg9;
625
626 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
627 return;
628 seg10 = (ctxp->ts_isr_status -
629 ctxp->ts_isr_cmd);
630 } else {
631 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
632 return;
633 seg6 = 0;
634 seg7 = 0;
635 seg8 = 0;
636 seg9 = 0;
637 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
638 }
639
640 phba->ktime_seg1_total += seg1;
641 if (seg1 < phba->ktime_seg1_min)
642 phba->ktime_seg1_min = seg1;
643 else if (seg1 > phba->ktime_seg1_max)
644 phba->ktime_seg1_max = seg1;
645
646 phba->ktime_seg2_total += seg2;
647 if (seg2 < phba->ktime_seg2_min)
648 phba->ktime_seg2_min = seg2;
649 else if (seg2 > phba->ktime_seg2_max)
650 phba->ktime_seg2_max = seg2;
651
652 phba->ktime_seg3_total += seg3;
653 if (seg3 < phba->ktime_seg3_min)
654 phba->ktime_seg3_min = seg3;
655 else if (seg3 > phba->ktime_seg3_max)
656 phba->ktime_seg3_max = seg3;
657
658 phba->ktime_seg4_total += seg4;
659 if (seg4 < phba->ktime_seg4_min)
660 phba->ktime_seg4_min = seg4;
661 else if (seg4 > phba->ktime_seg4_max)
662 phba->ktime_seg4_max = seg4;
663
664 phba->ktime_seg5_total += seg5;
665 if (seg5 < phba->ktime_seg5_min)
666 phba->ktime_seg5_min = seg5;
667 else if (seg5 > phba->ktime_seg5_max)
668 phba->ktime_seg5_max = seg5;
669
670 phba->ktime_data_samples++;
671 if (!seg6)
672 goto out;
673
674 phba->ktime_seg6_total += seg6;
675 if (seg6 < phba->ktime_seg6_min)
676 phba->ktime_seg6_min = seg6;
677 else if (seg6 > phba->ktime_seg6_max)
678 phba->ktime_seg6_max = seg6;
679
680 phba->ktime_seg7_total += seg7;
681 if (seg7 < phba->ktime_seg7_min)
682 phba->ktime_seg7_min = seg7;
683 else if (seg7 > phba->ktime_seg7_max)
684 phba->ktime_seg7_max = seg7;
685
686 phba->ktime_seg8_total += seg8;
687 if (seg8 < phba->ktime_seg8_min)
688 phba->ktime_seg8_min = seg8;
689 else if (seg8 > phba->ktime_seg8_max)
690 phba->ktime_seg8_max = seg8;
691
692 phba->ktime_seg9_total += seg9;
693 if (seg9 < phba->ktime_seg9_min)
694 phba->ktime_seg9_min = seg9;
695 else if (seg9 > phba->ktime_seg9_max)
696 phba->ktime_seg9_max = seg9;
697out:
698 phba->ktime_seg10_total += seg10;
699 if (seg10 < phba->ktime_seg10_min)
700 phba->ktime_seg10_min = seg10;
701 else if (seg10 > phba->ktime_seg10_max)
702 phba->ktime_seg10_max = seg10;
703 phba->ktime_status_samples++;
704}
705#endif
706
707
708
709
710
711
712
713
714
715
716
717static void
718lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
719 struct lpfc_wcqe_complete *wcqe)
720{
721 struct lpfc_nvmet_tgtport *tgtp;
722 struct nvmefc_tgt_fcp_req *rsp;
723 struct lpfc_async_xchg_ctx *ctxp;
724 uint32_t status, result, op, start_clean, logerr;
725#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 int id;
727#endif
728
729 ctxp = cmdwqe->context2;
730 ctxp->flag &= ~LPFC_NVME_IO_INP;
731
732 rsp = &ctxp->hdlrctx.fcp_req;
733 op = rsp->op;
734
735 status = bf_get(lpfc_wcqe_c_status, wcqe);
736 result = wcqe->parameter;
737
738 if (phba->targetport)
739 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
740 else
741 tgtp = NULL;
742
743 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
744 ctxp->oxid, op, status);
745
746 if (status) {
747 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
748 rsp->transferred_length = 0;
749 if (tgtp) {
750 atomic_inc(&tgtp->xmt_fcp_rsp_error);
751 if (result == IOERR_ABORT_REQUESTED)
752 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
753 }
754
755 logerr = LOG_NVME_IOERR;
756
757
758 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
759 ctxp->flag |= LPFC_NVME_XBUSY;
760 logerr |= LOG_NVME_ABTS;
761 if (tgtp)
762 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
763
764 } else {
765 ctxp->flag &= ~LPFC_NVME_XBUSY;
766 }
767
768 lpfc_printf_log(phba, KERN_INFO, logerr,
769 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770 "XBUSY:x%x\n",
771 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
772 status, result, ctxp->flag);
773
774 } else {
775 rsp->fcp_error = NVME_SC_SUCCESS;
776 if (op == NVMET_FCOP_RSP)
777 rsp->transferred_length = rsp->rsplen;
778 else
779 rsp->transferred_length = rsp->transfer_length;
780 if (tgtp)
781 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
782 }
783
784 if ((op == NVMET_FCOP_READDATA_RSP) ||
785 (op == NVMET_FCOP_RSP)) {
786
787 ctxp->state = LPFC_NVME_STE_DONE;
788 ctxp->entry_cnt++;
789
790#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
791 if (ctxp->ts_cmd_nvme) {
792 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
793 ctxp->ts_isr_data =
794 cmdwqe->isr_timestamp;
795 ctxp->ts_data_nvme =
796 ktime_get_ns();
797 ctxp->ts_nvme_status =
798 ctxp->ts_data_nvme;
799 ctxp->ts_status_wqput =
800 ctxp->ts_data_nvme;
801 ctxp->ts_isr_status =
802 ctxp->ts_data_nvme;
803 ctxp->ts_status_nvme =
804 ctxp->ts_data_nvme;
805 } else {
806 ctxp->ts_isr_status =
807 cmdwqe->isr_timestamp;
808 ctxp->ts_status_nvme =
809 ktime_get_ns();
810 }
811 }
812#endif
813 rsp->done(rsp);
814#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
815 if (ctxp->ts_cmd_nvme)
816 lpfc_nvmet_ktime(phba, ctxp);
817#endif
818
819 } else {
820 ctxp->entry_cnt++;
821 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
822 memset(((char *)cmdwqe) + start_clean, 0,
823 (sizeof(struct lpfc_iocbq) - start_clean));
824#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825 if (ctxp->ts_cmd_nvme) {
826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827 ctxp->ts_data_nvme = ktime_get_ns();
828 }
829#endif
830 rsp->done(rsp);
831 }
832#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
833 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
834 id = raw_smp_processor_id();
835 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836 if (ctxp->cpu != id)
837 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
838 "6704 CPU Check cmdcmpl: "
839 "cpu %d expect %d\n",
840 id, ctxp->cpu);
841 }
842#endif
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861int
862__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
863 struct nvmefc_ls_rsp *ls_rsp,
864 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
865 struct lpfc_iocbq *cmdwqe,
866 struct lpfc_wcqe_complete *wcqe))
867{
868 struct lpfc_hba *phba = axchg->phba;
869 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
870 struct lpfc_iocbq *nvmewqeq;
871 struct lpfc_dmabuf dmabuf;
872 struct ulp_bde64 bpl;
873 int rc;
874
875 if (phba->pport->load_flag & FC_UNLOADING)
876 return -ENODEV;
877
878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
879 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
880
881 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
882 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
883 "6412 NVMEx LS rsp state mismatch "
884 "oxid x%x: %d %d\n",
885 axchg->oxid, axchg->state, axchg->entry_cnt);
886 return -EALREADY;
887 }
888 axchg->state = LPFC_NVME_STE_LS_RSP;
889 axchg->entry_cnt++;
890
891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892 ls_rsp->rsplen);
893 if (nvmewqeq == NULL) {
894 lpfc_printf_log(phba, KERN_ERR,
895 LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
896 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
897 axchg->oxid);
898 rc = -ENOMEM;
899 goto out_free_buf;
900 }
901
902
903 nvmewqeq->rsvd2 = 1;
904 nvmewqeq->hba_wqidx = 0;
905 nvmewqeq->context3 = &dmabuf;
906 dmabuf.virt = &bpl;
907 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
908 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
909 bpl.tus.f.bdeSize = ls_rsp->rsplen;
910 bpl.tus.f.bdeFlags = 0;
911 bpl.tus.w = le32_to_cpu(bpl.tus.w);
912
913
914
915
916
917
918 nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
919 nvmewqeq->iocb_cmpl = NULL;
920 nvmewqeq->context2 = axchg;
921
922 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
923 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
924
925 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
926
927
928 nvmewqeq->context3 = NULL;
929
930 if (rc == WQE_SUCCESS) {
931
932
933
934
935 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
936 return 0;
937 }
938
939 lpfc_printf_log(phba, KERN_ERR,
940 LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
941 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
942 axchg->oxid, rc);
943
944 rc = -ENXIO;
945
946 lpfc_nlp_put(nvmewqeq->context1);
947
948out_free_buf:
949
950 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
951
952
953
954
955
956
957
958
959 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
960 return rc;
961}
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981static int
982lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
983 struct nvmefc_ls_rsp *ls_rsp)
984{
985 struct lpfc_async_xchg_ctx *axchg =
986 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
987 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
988 int rc;
989
990 if (axchg->phba->pport->load_flag & FC_UNLOADING)
991 return -ENODEV;
992
993 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
994
995 if (rc) {
996 atomic_inc(&nvmep->xmt_ls_drop);
997
998
999
1000
1001
1002 if (rc != -EALREADY)
1003 atomic_inc(&nvmep->xmt_ls_abort);
1004 return rc;
1005 }
1006
1007 atomic_inc(&nvmep->xmt_ls_rsp);
1008 return 0;
1009}
1010
1011static int
1012lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1013 struct nvmefc_tgt_fcp_req *rsp)
1014{
1015 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1016 struct lpfc_async_xchg_ctx *ctxp =
1017 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1018 struct lpfc_hba *phba = ctxp->phba;
1019 struct lpfc_queue *wq;
1020 struct lpfc_iocbq *nvmewqeq;
1021 struct lpfc_sli_ring *pring;
1022 unsigned long iflags;
1023 int rc;
1024#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1025 int id;
1026#endif
1027
1028 if (phba->pport->load_flag & FC_UNLOADING) {
1029 rc = -ENODEV;
1030 goto aerr;
1031 }
1032
1033#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1034 if (ctxp->ts_cmd_nvme) {
1035 if (rsp->op == NVMET_FCOP_RSP)
1036 ctxp->ts_nvme_status = ktime_get_ns();
1037 else
1038 ctxp->ts_nvme_data = ktime_get_ns();
1039 }
1040
1041
1042 if (!ctxp->hdwq)
1043 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1044
1045 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1046 id = raw_smp_processor_id();
1047 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1048 if (rsp->hwqid != id)
1049 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1050 "6705 CPU Check OP: "
1051 "cpu %d expect %d\n",
1052 id, rsp->hwqid);
1053 ctxp->cpu = id;
1054 }
1055#endif
1056
1057
1058 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1059 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1060 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1061 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1062 "6102 IO oxid x%x aborted\n",
1063 ctxp->oxid);
1064 rc = -ENXIO;
1065 goto aerr;
1066 }
1067
1068 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1069 if (nvmewqeq == NULL) {
1070 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1071 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1072 "6152 FCP Drop IO x%x: Prep\n",
1073 ctxp->oxid);
1074 rc = -ENXIO;
1075 goto aerr;
1076 }
1077
1078 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1079 nvmewqeq->iocb_cmpl = NULL;
1080 nvmewqeq->context2 = ctxp;
1081 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1082 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1083
1084 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1085 ctxp->oxid, rsp->op, rsp->rsplen);
1086
1087 ctxp->flag |= LPFC_NVME_IO_INP;
1088 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1089 if (rc == WQE_SUCCESS) {
1090#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1091 if (!ctxp->ts_cmd_nvme)
1092 return 0;
1093 if (rsp->op == NVMET_FCOP_RSP)
1094 ctxp->ts_status_wqput = ktime_get_ns();
1095 else
1096 ctxp->ts_data_wqput = ktime_get_ns();
1097#endif
1098 return 0;
1099 }
1100
1101 if (rc == -EBUSY) {
1102
1103
1104
1105
1106 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1107 wq = ctxp->hdwq->io_wq;
1108 pring = wq->pring;
1109 spin_lock_irqsave(&pring->ring_lock, iflags);
1110 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1111 wq->q_flag |= HBA_NVMET_WQFULL;
1112 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1113 atomic_inc(&lpfc_nvmep->defer_wqfull);
1114 return 0;
1115 }
1116
1117
1118 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1119 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1120 "6153 FCP Drop IO x%x: Issue: %d\n",
1121 ctxp->oxid, rc);
1122
1123 ctxp->wqeq->hba_wqidx = 0;
1124 nvmewqeq->context2 = NULL;
1125 nvmewqeq->context3 = NULL;
1126 rc = -EBUSY;
1127aerr:
1128 return rc;
1129}
1130
1131static void
1132lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1133{
1134 struct lpfc_nvmet_tgtport *tport = targetport->private;
1135
1136
1137 if (tport->phba->targetport)
1138 complete(tport->tport_unreg_cmp);
1139}
1140
1141static void
1142lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1143 struct nvmefc_tgt_fcp_req *req)
1144{
1145 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1146 struct lpfc_async_xchg_ctx *ctxp =
1147 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1148 struct lpfc_hba *phba = ctxp->phba;
1149 struct lpfc_queue *wq;
1150 unsigned long flags;
1151
1152 if (phba->pport->load_flag & FC_UNLOADING)
1153 return;
1154
1155 if (!ctxp->hdwq)
1156 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1157
1158 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1159 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1160 ctxp->oxid, ctxp->flag, ctxp->state);
1161
1162 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1163 ctxp->oxid, ctxp->flag, ctxp->state);
1164
1165 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1166
1167 spin_lock_irqsave(&ctxp->ctxlock, flags);
1168
1169
1170
1171
1172 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1173 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1174 return;
1175 }
1176 ctxp->flag |= LPFC_NVME_ABORT_OP;
1177
1178 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1179 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1180 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1181 ctxp->oxid);
1182 wq = ctxp->hdwq->io_wq;
1183 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1184 return;
1185 }
1186 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1187
1188
1189
1190
1191
1192 if (ctxp->state == LPFC_NVME_STE_RCV)
1193 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1194 ctxp->oxid);
1195 else
1196 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1197 ctxp->oxid);
1198}
1199
1200static void
1201lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1202 struct nvmefc_tgt_fcp_req *rsp)
1203{
1204 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1205 struct lpfc_async_xchg_ctx *ctxp =
1206 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1207 struct lpfc_hba *phba = ctxp->phba;
1208 unsigned long flags;
1209 bool aborting = false;
1210
1211 spin_lock_irqsave(&ctxp->ctxlock, flags);
1212 if (ctxp->flag & LPFC_NVME_XBUSY)
1213 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1214 "6027 NVMET release with XBUSY flag x%x"
1215 " oxid x%x\n",
1216 ctxp->flag, ctxp->oxid);
1217 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1218 ctxp->state != LPFC_NVME_STE_ABORT)
1219 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1220 "6413 NVMET release bad state %d %d oxid x%x\n",
1221 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1222
1223 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1224 (ctxp->flag & LPFC_NVME_XBUSY)) {
1225 aborting = true;
1226
1227 lpfc_nvmet_defer_release(phba, ctxp);
1228 }
1229 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1230
1231 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1232 ctxp->state, aborting);
1233
1234 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1235 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1236
1237 if (aborting)
1238 return;
1239
1240 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1241}
1242
1243static void
1244lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1245 struct nvmefc_tgt_fcp_req *rsp)
1246{
1247 struct lpfc_nvmet_tgtport *tgtp;
1248 struct lpfc_async_xchg_ctx *ctxp =
1249 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1250 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1251 struct lpfc_hba *phba = ctxp->phba;
1252 unsigned long iflag;
1253
1254
1255 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1256 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1257
1258 if (!nvmebuf) {
1259 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1260 "6425 Defer rcv: no buffer oxid x%x: "
1261 "flg %x ste %x\n",
1262 ctxp->oxid, ctxp->flag, ctxp->state);
1263 return;
1264 }
1265
1266 tgtp = phba->targetport->private;
1267 if (tgtp)
1268 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1269
1270
1271 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1272 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1273 ctxp->rqb_buffer = NULL;
1274 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287static void
1288lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1289 struct lpfc_wcqe_complete *wcqe)
1290{
1291 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int
1309lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1310 void *hosthandle,
1311 struct nvmefc_ls_req *pnvme_lsreq)
1312{
1313 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1314 struct lpfc_hba *phba;
1315 struct lpfc_nodelist *ndlp;
1316 int ret;
1317 u32 hstate;
1318
1319 if (!lpfc_nvmet)
1320 return -EINVAL;
1321
1322 phba = lpfc_nvmet->phba;
1323 if (phba->pport->load_flag & FC_UNLOADING)
1324 return -EINVAL;
1325
1326 hstate = atomic_read(&lpfc_nvmet->state);
1327 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1328 return -EACCES;
1329
1330 ndlp = (struct lpfc_nodelist *)hosthandle;
1331
1332 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1333 lpfc_nvmet_ls_req_cmp);
1334
1335 return ret;
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static void
1349lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1350 void *hosthandle,
1351 struct nvmefc_ls_req *pnvme_lsreq)
1352{
1353 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1354 struct lpfc_hba *phba;
1355 struct lpfc_nodelist *ndlp;
1356 int ret;
1357
1358 phba = lpfc_nvmet->phba;
1359 if (phba->pport->load_flag & FC_UNLOADING)
1360 return;
1361
1362 ndlp = (struct lpfc_nodelist *)hosthandle;
1363
1364 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1365 if (!ret)
1366 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1367}
1368
1369static void
1370lpfc_nvmet_host_release(void *hosthandle)
1371{
1372 struct lpfc_nodelist *ndlp = hosthandle;
1373 struct lpfc_hba *phba = NULL;
1374 struct lpfc_nvmet_tgtport *tgtp;
1375
1376 phba = ndlp->phba;
1377 if (!phba->targetport || !phba->targetport->private)
1378 return;
1379
1380 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1381 "6202 NVMET XPT releasing hosthandle x%px\n",
1382 hosthandle);
1383 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1384 atomic_set(&tgtp->state, 0);
1385}
1386
1387static void
1388lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1389{
1390 struct lpfc_nvmet_tgtport *tgtp;
1391 struct lpfc_hba *phba;
1392 uint32_t rc;
1393
1394 tgtp = tgtport->private;
1395 phba = tgtp->phba;
1396
1397 rc = lpfc_issue_els_rscn(phba->pport, 0);
1398 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1399 "6420 NVMET subsystem change: Notification %s\n",
1400 (rc) ? "Failed" : "Sent");
1401}
1402
1403static struct nvmet_fc_target_template lpfc_tgttemplate = {
1404 .targetport_delete = lpfc_nvmet_targetport_delete,
1405 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1406 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1407 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1408 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1409 .defer_rcv = lpfc_nvmet_defer_rcv,
1410 .discovery_event = lpfc_nvmet_discovery_event,
1411 .ls_req = lpfc_nvmet_ls_req,
1412 .ls_abort = lpfc_nvmet_ls_abort,
1413 .host_release = lpfc_nvmet_host_release,
1414
1415 .max_hw_queues = 1,
1416 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1417 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1418 .dma_boundary = 0xFFFFFFFF,
1419
1420
1421 .target_features = 0,
1422
1423 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1424 .lsrqst_priv_sz = 0,
1425};
1426
1427static void
1428__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1429 struct lpfc_nvmet_ctx_info *infop)
1430{
1431 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1432 unsigned long flags;
1433
1434 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1435 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1436 &infop->nvmet_ctx_list, list) {
1437 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1438 list_del_init(&ctx_buf->list);
1439 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440
1441 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1442 ctx_buf->sglq->state = SGL_FREED;
1443 ctx_buf->sglq->ndlp = NULL;
1444
1445 spin_lock(&phba->sli4_hba.sgl_list_lock);
1446 list_add_tail(&ctx_buf->sglq->list,
1447 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1448 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1449
1450 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1451 kfree(ctx_buf->context);
1452 }
1453 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1454}
1455
1456static void
1457lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1458{
1459 struct lpfc_nvmet_ctx_info *infop;
1460 int i, j;
1461
1462
1463 infop = phba->sli4_hba.nvmet_ctx_info;
1464 if (!infop)
1465 return;
1466
1467
1468 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1469 for_each_present_cpu(j) {
1470 infop = lpfc_get_ctx_list(phba, j, i);
1471 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1472 }
1473 }
1474 kfree(phba->sli4_hba.nvmet_ctx_info);
1475 phba->sli4_hba.nvmet_ctx_info = NULL;
1476}
1477
1478static int
1479lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1480{
1481 struct lpfc_nvmet_ctxbuf *ctx_buf;
1482 struct lpfc_iocbq *nvmewqe;
1483 union lpfc_wqe128 *wqe;
1484 struct lpfc_nvmet_ctx_info *last_infop;
1485 struct lpfc_nvmet_ctx_info *infop;
1486 int i, j, idx, cpu;
1487
1488 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1489 "6403 Allocate NVMET resources for %d XRIs\n",
1490 phba->sli4_hba.nvmet_xri_cnt);
1491
1492 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1493 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1494 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1495 if (!phba->sli4_hba.nvmet_ctx_info) {
1496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1497 "6419 Failed allocate memory for "
1498 "nvmet context lists\n");
1499 return -ENOMEM;
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 for_each_possible_cpu(i) {
1522 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1523 infop = lpfc_get_ctx_list(phba, i, j);
1524 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1525 spin_lock_init(&infop->nvmet_ctx_list_lock);
1526 infop->nvmet_ctx_list_cnt = 0;
1527 }
1528 }
1529
1530
1531
1532
1533
1534
1535 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1536 last_infop = lpfc_get_ctx_list(phba,
1537 cpumask_first(cpu_present_mask),
1538 j);
1539 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1540 infop = lpfc_get_ctx_list(phba, i, j);
1541 infop->nvmet_ctx_next_cpu = last_infop;
1542 last_infop = infop;
1543 }
1544 }
1545
1546
1547
1548
1549 idx = 0;
1550 cpu = cpumask_first(cpu_present_mask);
1551 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1552 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1553 if (!ctx_buf) {
1554 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1555 "6404 Ran out of memory for NVMET\n");
1556 return -ENOMEM;
1557 }
1558
1559 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1560 GFP_KERNEL);
1561 if (!ctx_buf->context) {
1562 kfree(ctx_buf);
1563 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1564 "6405 Ran out of NVMET "
1565 "context memory\n");
1566 return -ENOMEM;
1567 }
1568 ctx_buf->context->ctxbuf = ctx_buf;
1569 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1570
1571 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1572 if (!ctx_buf->iocbq) {
1573 kfree(ctx_buf->context);
1574 kfree(ctx_buf);
1575 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1576 "6406 Ran out of NVMET iocb/WQEs\n");
1577 return -ENOMEM;
1578 }
1579 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1580 nvmewqe = ctx_buf->iocbq;
1581 wqe = &nvmewqe->wqe;
1582
1583
1584 memset(wqe, 0, sizeof(union lpfc_wqe));
1585
1586 ctx_buf->iocbq->context1 = NULL;
1587 spin_lock(&phba->sli4_hba.sgl_list_lock);
1588 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1589 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1590 if (!ctx_buf->sglq) {
1591 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1592 kfree(ctx_buf->context);
1593 kfree(ctx_buf);
1594 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1595 "6407 Ran out of NVMET XRIs\n");
1596 return -ENOMEM;
1597 }
1598 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1599
1600
1601
1602
1603
1604
1605 infop = lpfc_get_ctx_list(phba, cpu, idx);
1606 spin_lock(&infop->nvmet_ctx_list_lock);
1607 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1608 infop->nvmet_ctx_list_cnt++;
1609 spin_unlock(&infop->nvmet_ctx_list_lock);
1610
1611
1612 idx++;
1613 if (idx >= phba->cfg_nvmet_mrq) {
1614 idx = 0;
1615 cpu = cpumask_first(cpu_present_mask);
1616 continue;
1617 }
1618 cpu = cpumask_next(cpu, cpu_present_mask);
1619 if (cpu == nr_cpu_ids)
1620 cpu = cpumask_first(cpu_present_mask);
1621
1622 }
1623
1624 for_each_present_cpu(i) {
1625 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1626 infop = lpfc_get_ctx_list(phba, i, j);
1627 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1628 "6408 TOTAL NVMET ctx for CPU %d "
1629 "MRQ %d: cnt %d nextcpu x%px\n",
1630 i, j, infop->nvmet_ctx_list_cnt,
1631 infop->nvmet_ctx_next_cpu);
1632 }
1633 }
1634 return 0;
1635}
1636
1637int
1638lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1639{
1640 struct lpfc_vport *vport = phba->pport;
1641 struct lpfc_nvmet_tgtport *tgtp;
1642 struct nvmet_fc_port_info pinfo;
1643 int error;
1644
1645 if (phba->targetport)
1646 return 0;
1647
1648 error = lpfc_nvmet_setup_io_context(phba);
1649 if (error)
1650 return error;
1651
1652 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1653 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1654 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1655 pinfo.port_id = vport->fc_myDID;
1656
1657
1658
1659
1660
1661 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1662 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1663 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1664
1665#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1666 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1667 &phba->pcidev->dev,
1668 &phba->targetport);
1669#else
1670 error = -ENOENT;
1671#endif
1672 if (error) {
1673 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1674 "6025 Cannot register NVME targetport x%x: "
1675 "portnm %llx nodenm %llx segs %d qs %d\n",
1676 error,
1677 pinfo.port_name, pinfo.node_name,
1678 lpfc_tgttemplate.max_sgl_segments,
1679 lpfc_tgttemplate.max_hw_queues);
1680 phba->targetport = NULL;
1681 phba->nvmet_support = 0;
1682
1683 lpfc_nvmet_cleanup_io_context(phba);
1684
1685 } else {
1686 tgtp = (struct lpfc_nvmet_tgtport *)
1687 phba->targetport->private;
1688 tgtp->phba = phba;
1689
1690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1691 "6026 Registered NVME "
1692 "targetport: x%px, private x%px "
1693 "portnm %llx nodenm %llx segs %d qs %d\n",
1694 phba->targetport, tgtp,
1695 pinfo.port_name, pinfo.node_name,
1696 lpfc_tgttemplate.max_sgl_segments,
1697 lpfc_tgttemplate.max_hw_queues);
1698
1699 atomic_set(&tgtp->rcv_ls_req_in, 0);
1700 atomic_set(&tgtp->rcv_ls_req_out, 0);
1701 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1702 atomic_set(&tgtp->xmt_ls_abort, 0);
1703 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1704 atomic_set(&tgtp->xmt_ls_rsp, 0);
1705 atomic_set(&tgtp->xmt_ls_drop, 0);
1706 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1707 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1708 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1710 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1711 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1712 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1713 atomic_set(&tgtp->xmt_fcp_drop, 0);
1714 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1715 atomic_set(&tgtp->xmt_fcp_read, 0);
1716 atomic_set(&tgtp->xmt_fcp_write, 0);
1717 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1718 atomic_set(&tgtp->xmt_fcp_release, 0);
1719 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1720 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1721 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1723 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1724 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1725 atomic_set(&tgtp->xmt_fcp_abort, 0);
1726 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1727 atomic_set(&tgtp->xmt_abort_unsol, 0);
1728 atomic_set(&tgtp->xmt_abort_sol, 0);
1729 atomic_set(&tgtp->xmt_abort_rsp, 0);
1730 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1731 atomic_set(&tgtp->defer_ctx, 0);
1732 atomic_set(&tgtp->defer_fod, 0);
1733 atomic_set(&tgtp->defer_wqfull, 0);
1734 }
1735 return error;
1736}
1737
1738int
1739lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1740{
1741 struct lpfc_vport *vport = phba->pport;
1742
1743 if (!phba->targetport)
1744 return 0;
1745
1746 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1747 "6007 Update NVMET port x%px did x%x\n",
1748 phba->targetport, vport->fc_myDID);
1749
1750 phba->targetport->port_id = vport->fc_myDID;
1751 return 0;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762void
1763lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1764 struct sli4_wcqe_xri_aborted *axri)
1765{
1766#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1767 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1768 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1769 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1770 struct lpfc_nvmet_tgtport *tgtp;
1771 struct nvmefc_tgt_fcp_req *req = NULL;
1772 struct lpfc_nodelist *ndlp;
1773 unsigned long iflag = 0;
1774 int rrq_empty = 0;
1775 bool released = false;
1776
1777 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1778 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1779
1780 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1781 return;
1782
1783 if (phba->targetport) {
1784 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1785 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1786 }
1787
1788 spin_lock_irqsave(&phba->hbalock, iflag);
1789 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1790 list_for_each_entry_safe(ctxp, next_ctxp,
1791 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1792 list) {
1793 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1794 continue;
1795
1796 spin_lock(&ctxp->ctxlock);
1797
1798
1799
1800 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1801 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1802 list_del_init(&ctxp->list);
1803 released = true;
1804 }
1805 ctxp->flag &= ~LPFC_NVME_XBUSY;
1806 spin_unlock(&ctxp->ctxlock);
1807 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1808
1809 rrq_empty = list_empty(&phba->active_rrq_list);
1810 spin_unlock_irqrestore(&phba->hbalock, iflag);
1811 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1812 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1813 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1814 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1815 lpfc_set_rrq_active(phba, ndlp,
1816 ctxp->ctxbuf->sglq->sli4_lxritag,
1817 rxid, 1);
1818 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1819 }
1820
1821 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1822 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1823 ctxp->oxid, ctxp->flag, released);
1824 if (released)
1825 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1826
1827 if (rrq_empty)
1828 lpfc_worker_wake_up(phba);
1829 return;
1830 }
1831 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1832 spin_unlock_irqrestore(&phba->hbalock, iflag);
1833
1834 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1835 if (ctxp) {
1836
1837
1838
1839
1840 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1841 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1842 "flag x%x oxid x%x rxid x%x\n",
1843 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1844 rxid);
1845
1846 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1847 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1848 ctxp->state = LPFC_NVME_STE_ABORT;
1849 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1850
1851 lpfc_nvmeio_data(phba,
1852 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1853 xri, raw_smp_processor_id(), 0);
1854
1855 req = &ctxp->hdlrctx.fcp_req;
1856 if (req)
1857 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1858 }
1859#endif
1860}
1861
1862int
1863lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1864 struct fc_frame_header *fc_hdr)
1865{
1866#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1867 struct lpfc_hba *phba = vport->phba;
1868 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1869 struct nvmefc_tgt_fcp_req *rsp;
1870 uint32_t sid;
1871 uint16_t oxid, xri;
1872 unsigned long iflag = 0;
1873
1874 sid = sli4_sid_from_fc_hdr(fc_hdr);
1875 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1876
1877 spin_lock_irqsave(&phba->hbalock, iflag);
1878 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1879 list_for_each_entry_safe(ctxp, next_ctxp,
1880 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1881 list) {
1882 if (ctxp->oxid != oxid || ctxp->sid != sid)
1883 continue;
1884
1885 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1886
1887 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1888 spin_unlock_irqrestore(&phba->hbalock, iflag);
1889
1890 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1891 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1892 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1893
1894 lpfc_nvmeio_data(phba,
1895 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1896 xri, raw_smp_processor_id(), 0);
1897
1898 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1899 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1900
1901 rsp = &ctxp->hdlrctx.fcp_req;
1902 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1903
1904
1905 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1906 return 0;
1907 }
1908 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1909 spin_unlock_irqrestore(&phba->hbalock, iflag);
1910
1911
1912 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1913 struct rqb_dmabuf *nvmebuf;
1914 struct fc_frame_header *fc_hdr_tmp;
1915 u32 sid_tmp;
1916 u16 oxid_tmp;
1917 bool found = false;
1918
1919 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1920
1921
1922 list_for_each_entry(nvmebuf,
1923 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1924 hbuf.list) {
1925 fc_hdr_tmp = (struct fc_frame_header *)
1926 (nvmebuf->hbuf.virt);
1927 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1928 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1929 if (oxid_tmp != oxid || sid_tmp != sid)
1930 continue;
1931
1932 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1933 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1934 "is waiting for a ctxp\n",
1935 oxid, sid);
1936
1937 list_del_init(&nvmebuf->hbuf.list);
1938 phba->sli4_hba.nvmet_io_wait_cnt--;
1939 found = true;
1940 break;
1941 }
1942 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1943 iflag);
1944
1945
1946 if (found) {
1947 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1948
1949 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1950 return 0;
1951 }
1952 }
1953
1954
1955 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1956 if (ctxp) {
1957 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1958
1959 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1960 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1961 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1962
1963 lpfc_nvmeio_data(phba,
1964 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1965 xri, raw_smp_processor_id(), 0);
1966
1967 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1968 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1969 "flag x%x state x%x\n",
1970 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1971
1972 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1973
1974 nvmet_fc_rcv_fcp_abort(phba->targetport,
1975 &ctxp->hdlrctx.fcp_req);
1976 } else {
1977 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1978 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1979 lpfc_nvmet_defer_release(phba, ctxp);
1980 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1981 }
1982 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1983 ctxp->oxid);
1984
1985 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1986 return 0;
1987 }
1988
1989 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1990 oxid, raw_smp_processor_id(), 1);
1991
1992 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1993 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1994
1995
1996 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1997#endif
1998 return 0;
1999}
2000
2001static void
2002lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2003 struct lpfc_async_xchg_ctx *ctxp)
2004{
2005 struct lpfc_sli_ring *pring;
2006 struct lpfc_iocbq *nvmewqeq;
2007 struct lpfc_iocbq *next_nvmewqeq;
2008 unsigned long iflags;
2009 struct lpfc_wcqe_complete wcqe;
2010 struct lpfc_wcqe_complete *wcqep;
2011
2012 pring = wq->pring;
2013 wcqep = &wcqe;
2014
2015
2016 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2017 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2018 wcqep->parameter = IOERR_ABORT_REQUESTED;
2019
2020 spin_lock_irqsave(&pring->ring_lock, iflags);
2021 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2022 &wq->wqfull_list, list) {
2023 if (ctxp) {
2024
2025 if (nvmewqeq->context2 == ctxp) {
2026 list_del(&nvmewqeq->list);
2027 spin_unlock_irqrestore(&pring->ring_lock,
2028 iflags);
2029 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2030 wcqep);
2031 return;
2032 }
2033 continue;
2034 } else {
2035
2036 list_del(&nvmewqeq->list);
2037 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2038 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2039 spin_lock_irqsave(&pring->ring_lock, iflags);
2040 }
2041 }
2042 if (!ctxp)
2043 wq->q_flag &= ~HBA_NVMET_WQFULL;
2044 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2045}
2046
2047void
2048lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2049 struct lpfc_queue *wq)
2050{
2051#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2052 struct lpfc_sli_ring *pring;
2053 struct lpfc_iocbq *nvmewqeq;
2054 struct lpfc_async_xchg_ctx *ctxp;
2055 unsigned long iflags;
2056 int rc;
2057
2058
2059
2060
2061
2062 pring = wq->pring;
2063 spin_lock_irqsave(&pring->ring_lock, iflags);
2064 while (!list_empty(&wq->wqfull_list)) {
2065 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2066 list);
2067 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2068 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2069 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2070 spin_lock_irqsave(&pring->ring_lock, iflags);
2071 if (rc == -EBUSY) {
2072
2073 list_add(&nvmewqeq->list, &wq->wqfull_list);
2074 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2075 return;
2076 }
2077 if (rc == WQE_SUCCESS) {
2078#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2079 if (ctxp->ts_cmd_nvme) {
2080 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2081 ctxp->ts_status_wqput = ktime_get_ns();
2082 else
2083 ctxp->ts_data_wqput = ktime_get_ns();
2084 }
2085#endif
2086 } else {
2087 WARN_ON(rc);
2088 }
2089 }
2090 wq->q_flag &= ~HBA_NVMET_WQFULL;
2091 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2092
2093#endif
2094}
2095
2096void
2097lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2098{
2099#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2100 struct lpfc_nvmet_tgtport *tgtp;
2101 struct lpfc_queue *wq;
2102 uint32_t qidx;
2103 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2104
2105 if (phba->nvmet_support == 0)
2106 return;
2107 if (phba->targetport) {
2108 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2109 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2110 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2111 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2112 }
2113 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2114 nvmet_fc_unregister_targetport(phba->targetport);
2115 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
2116 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2117 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2118 "6179 Unreg targetport x%px timeout "
2119 "reached.\n", phba->targetport);
2120 lpfc_nvmet_cleanup_io_context(phba);
2121 }
2122 phba->targetport = NULL;
2123#endif
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142int
2143lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2144 struct lpfc_async_xchg_ctx *axchg)
2145{
2146#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2147 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2148 uint32_t *payload = axchg->payload;
2149 int rc;
2150
2151 atomic_inc(&tgtp->rcv_ls_req_in);
2152
2153
2154
2155
2156
2157
2158 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2159 axchg->payload, axchg->size);
2160
2161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2162 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2163 "%08x %08x %08x\n", axchg->size, rc,
2164 *payload, *(payload+1), *(payload+2),
2165 *(payload+3), *(payload+4), *(payload+5));
2166
2167 if (!rc) {
2168 atomic_inc(&tgtp->rcv_ls_req_out);
2169 return 0;
2170 }
2171
2172 atomic_inc(&tgtp->rcv_ls_req_drop);
2173#endif
2174 return 1;
2175}
2176
2177static void
2178lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2179{
2180#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2181 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2182 struct lpfc_hba *phba = ctxp->phba;
2183 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2184 struct lpfc_nvmet_tgtport *tgtp;
2185 uint32_t *payload, qno;
2186 uint32_t rc;
2187 unsigned long iflags;
2188
2189 if (!nvmebuf) {
2190 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2191 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2192 "oxid: x%x flg: x%x state: x%x\n",
2193 ctxp->oxid, ctxp->flag, ctxp->state);
2194 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2195 lpfc_nvmet_defer_release(phba, ctxp);
2196 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2197 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2198 ctxp->oxid);
2199 return;
2200 }
2201
2202 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2203 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2204 "6324 IO oxid x%x aborted\n",
2205 ctxp->oxid);
2206 return;
2207 }
2208
2209 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2210 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2211 ctxp->flag |= LPFC_NVME_TNOTIFY;
2212#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2213 if (ctxp->ts_isr_cmd)
2214 ctxp->ts_cmd_nvme = ktime_get_ns();
2215#endif
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2226 payload, ctxp->size);
2227
2228 if (rc == 0) {
2229 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2230 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2231 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2232 (nvmebuf != ctxp->rqb_buffer)) {
2233 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2234 return;
2235 }
2236 ctxp->rqb_buffer = NULL;
2237 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2238 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2239 return;
2240 }
2241
2242
2243 if (rc == -EOVERFLOW) {
2244 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2245 "from %06x\n",
2246 ctxp->oxid, ctxp->size, ctxp->sid);
2247 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2248 atomic_inc(&tgtp->defer_fod);
2249 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2250 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2251 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2252 return;
2253 }
2254 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2255
2256
2257
2258
2259 qno = nvmebuf->idx;
2260 lpfc_post_rq_buffer(
2261 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2262 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2263 return;
2264 }
2265 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2266 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2267 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2268 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2269 ctxp->oxid, rc,
2270 atomic_read(&tgtp->rcv_fcp_cmd_in),
2271 atomic_read(&tgtp->rcv_fcp_cmd_out),
2272 atomic_read(&tgtp->xmt_fcp_release));
2273 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2274 ctxp->oxid, ctxp->size, ctxp->sid);
2275 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2276 lpfc_nvmet_defer_release(phba, ctxp);
2277 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2278 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2279#endif
2280}
2281
2282static void
2283lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2284{
2285#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2286 struct lpfc_nvmet_ctxbuf *ctx_buf =
2287 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2288
2289 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2290#endif
2291}
2292
2293static struct lpfc_nvmet_ctxbuf *
2294lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2295 struct lpfc_nvmet_ctx_info *current_infop)
2296{
2297#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2298 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2299 struct lpfc_nvmet_ctx_info *get_infop;
2300 int i;
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312 if (current_infop->nvmet_ctx_start_cpu)
2313 get_infop = current_infop->nvmet_ctx_start_cpu;
2314 else
2315 get_infop = current_infop->nvmet_ctx_next_cpu;
2316
2317 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2318 if (get_infop == current_infop) {
2319 get_infop = get_infop->nvmet_ctx_next_cpu;
2320 continue;
2321 }
2322 spin_lock(&get_infop->nvmet_ctx_list_lock);
2323
2324
2325 if (get_infop->nvmet_ctx_list_cnt) {
2326 list_splice_init(&get_infop->nvmet_ctx_list,
2327 ¤t_infop->nvmet_ctx_list);
2328 current_infop->nvmet_ctx_list_cnt =
2329 get_infop->nvmet_ctx_list_cnt - 1;
2330 get_infop->nvmet_ctx_list_cnt = 0;
2331 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2332
2333 current_infop->nvmet_ctx_start_cpu = get_infop;
2334 list_remove_head(¤t_infop->nvmet_ctx_list,
2335 ctx_buf, struct lpfc_nvmet_ctxbuf,
2336 list);
2337 return ctx_buf;
2338 }
2339
2340
2341 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2342 get_infop = get_infop->nvmet_ctx_next_cpu;
2343 }
2344
2345#endif
2346
2347 return NULL;
2348}
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365static void
2366lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2367 uint32_t idx,
2368 struct rqb_dmabuf *nvmebuf,
2369 uint64_t isr_timestamp,
2370 uint8_t cqflag)
2371{
2372 struct lpfc_async_xchg_ctx *ctxp;
2373 struct lpfc_nvmet_tgtport *tgtp;
2374 struct fc_frame_header *fc_hdr;
2375 struct lpfc_nvmet_ctxbuf *ctx_buf;
2376 struct lpfc_nvmet_ctx_info *current_infop;
2377 uint32_t size, oxid, sid, qno;
2378 unsigned long iflag;
2379 int current_cpu;
2380
2381 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2382 return;
2383
2384 ctx_buf = NULL;
2385 if (!nvmebuf || !phba->targetport) {
2386 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2387 "6157 NVMET FCP Drop IO\n");
2388 if (nvmebuf)
2389 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2390 return;
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400 current_cpu = raw_smp_processor_id();
2401 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2402 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2403 if (current_infop->nvmet_ctx_list_cnt) {
2404 list_remove_head(¤t_infop->nvmet_ctx_list,
2405 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2406 current_infop->nvmet_ctx_list_cnt--;
2407 } else {
2408 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2409 }
2410 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2411
2412 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2413 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2414 size = nvmebuf->bytes_recv;
2415
2416#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2417 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2418 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2419 if (idx != current_cpu)
2420 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2421 "6703 CPU Check rcv: "
2422 "cpu %d expect %d\n",
2423 current_cpu, idx);
2424 }
2425#endif
2426
2427 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2428 oxid, size, raw_smp_processor_id());
2429
2430 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2431
2432 if (!ctx_buf) {
2433
2434 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2435 list_add_tail(&nvmebuf->hbuf.list,
2436 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2437 phba->sli4_hba.nvmet_io_wait_cnt++;
2438 phba->sli4_hba.nvmet_io_wait_total++;
2439 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2440 iflag);
2441
2442
2443 qno = nvmebuf->idx;
2444 lpfc_post_rq_buffer(
2445 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2446 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2447
2448 atomic_inc(&tgtp->defer_ctx);
2449 return;
2450 }
2451
2452 sid = sli4_sid_from_fc_hdr(fc_hdr);
2453
2454 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2455 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2456 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2457 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2458 if (ctxp->state != LPFC_NVME_STE_FREE) {
2459 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2460 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2461 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2462 }
2463 ctxp->wqeq = NULL;
2464 ctxp->offset = 0;
2465 ctxp->phba = phba;
2466 ctxp->size = size;
2467 ctxp->oxid = oxid;
2468 ctxp->sid = sid;
2469 ctxp->idx = idx;
2470 ctxp->state = LPFC_NVME_STE_RCV;
2471 ctxp->entry_cnt = 1;
2472 ctxp->flag = 0;
2473 ctxp->ctxbuf = ctx_buf;
2474 ctxp->rqb_buffer = (void *)nvmebuf;
2475 ctxp->hdwq = NULL;
2476 spin_lock_init(&ctxp->ctxlock);
2477
2478#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2479 if (isr_timestamp)
2480 ctxp->ts_isr_cmd = isr_timestamp;
2481 ctxp->ts_cmd_nvme = 0;
2482 ctxp->ts_nvme_data = 0;
2483 ctxp->ts_data_wqput = 0;
2484 ctxp->ts_isr_data = 0;
2485 ctxp->ts_data_nvme = 0;
2486 ctxp->ts_nvme_status = 0;
2487 ctxp->ts_status_wqput = 0;
2488 ctxp->ts_isr_status = 0;
2489 ctxp->ts_status_nvme = 0;
2490#endif
2491
2492 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2493
2494 if (!cqflag) {
2495 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2496 return;
2497 }
2498
2499 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2500 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2501 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2502 "6325 Unable to queue work for oxid x%x. "
2503 "FCP Drop IO [x%x x%x x%x]\n",
2504 ctxp->oxid,
2505 atomic_read(&tgtp->rcv_fcp_cmd_in),
2506 atomic_read(&tgtp->rcv_fcp_cmd_out),
2507 atomic_read(&tgtp->xmt_fcp_release));
2508
2509 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2510 lpfc_nvmet_defer_release(phba, ctxp);
2511 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2512 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2513 }
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530void
2531lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2532 uint32_t idx,
2533 struct rqb_dmabuf *nvmebuf,
2534 uint64_t isr_timestamp,
2535 uint8_t cqflag)
2536{
2537 if (!nvmebuf) {
2538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2539 "3167 NVMET FCP Drop IO\n");
2540 return;
2541 }
2542 if (phba->nvmet_support == 0) {
2543 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2544 return;
2545 }
2546 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574static struct lpfc_iocbq *
2575lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2576 struct lpfc_async_xchg_ctx *ctxp,
2577 dma_addr_t rspbuf, uint16_t rspsize)
2578{
2579 struct lpfc_nodelist *ndlp;
2580 struct lpfc_iocbq *nvmewqe;
2581 union lpfc_wqe128 *wqe;
2582
2583 if (!lpfc_is_link_up(phba)) {
2584 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2585 "6104 NVMET prep LS wqe: link err: "
2586 "NPORT x%x oxid:x%x ste %d\n",
2587 ctxp->sid, ctxp->oxid, ctxp->state);
2588 return NULL;
2589 }
2590
2591
2592 nvmewqe = lpfc_sli_get_iocbq(phba);
2593 if (nvmewqe == NULL) {
2594 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2595 "6105 NVMET prep LS wqe: No WQE: "
2596 "NPORT x%x oxid x%x ste %d\n",
2597 ctxp->sid, ctxp->oxid, ctxp->state);
2598 return NULL;
2599 }
2600
2601 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2602 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2603 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2604 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2605 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2606 "6106 NVMET prep LS wqe: No ndlp: "
2607 "NPORT x%x oxid x%x ste %d\n",
2608 ctxp->sid, ctxp->oxid, ctxp->state);
2609 goto nvme_wqe_free_wqeq_exit;
2610 }
2611 ctxp->wqeq = nvmewqe;
2612
2613
2614 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2615 if (nvmewqe->context1 == NULL)
2616 goto nvme_wqe_free_wqeq_exit;
2617 nvmewqe->context2 = ctxp;
2618
2619 wqe = &nvmewqe->wqe;
2620 memset(wqe, 0, sizeof(union lpfc_wqe));
2621
2622
2623 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2624 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2625 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2626 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2627
2628
2629
2630
2631
2632
2633 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2634 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2635 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2636 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2637 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2638
2639
2640 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2641 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2642 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2643
2644
2645 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2646 CMD_XMIT_SEQUENCE64_WQE);
2647 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2648 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2649 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2650
2651
2652 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2653
2654
2655 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2656
2657 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2658
2659
2660 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2661 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2662 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2663 LPFC_WQE_LENLOC_WORD12);
2664 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2665
2666
2667 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2668 LPFC_WQE_CQ_ID_DEFAULT);
2669 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2670 OTHER_COMMAND);
2671
2672
2673 wqe->xmit_sequence.xmit_len = rspsize;
2674
2675 nvmewqe->retry = 1;
2676 nvmewqe->vport = phba->pport;
2677 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2678 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2679
2680
2681 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2682 "6039 Xmit NVMET LS response to remote "
2683 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2684 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2685 rspsize);
2686 return nvmewqe;
2687
2688nvme_wqe_free_wqeq_exit:
2689 nvmewqe->context2 = NULL;
2690 nvmewqe->context3 = NULL;
2691 lpfc_sli_release_iocbq(phba, nvmewqe);
2692 return NULL;
2693}
2694
2695
2696static struct lpfc_iocbq *
2697lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2698 struct lpfc_async_xchg_ctx *ctxp)
2699{
2700 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2701 struct lpfc_nvmet_tgtport *tgtp;
2702 struct sli4_sge *sgl;
2703 struct lpfc_nodelist *ndlp;
2704 struct lpfc_iocbq *nvmewqe;
2705 struct scatterlist *sgel;
2706 union lpfc_wqe128 *wqe;
2707 struct ulp_bde64 *bde;
2708 dma_addr_t physaddr;
2709 int i, cnt, nsegs;
2710 int do_pbde;
2711 int xc = 1;
2712
2713 if (!lpfc_is_link_up(phba)) {
2714 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2715 "6107 NVMET prep FCP wqe: link err:"
2716 "NPORT x%x oxid x%x ste %d\n",
2717 ctxp->sid, ctxp->oxid, ctxp->state);
2718 return NULL;
2719 }
2720
2721 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2722 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2723 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2724 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2725 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2726 "6108 NVMET prep FCP wqe: no ndlp: "
2727 "NPORT x%x oxid x%x ste %d\n",
2728 ctxp->sid, ctxp->oxid, ctxp->state);
2729 return NULL;
2730 }
2731
2732 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2733 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2734 "6109 NVMET prep FCP wqe: seg cnt err: "
2735 "NPORT x%x oxid x%x ste %d cnt %d\n",
2736 ctxp->sid, ctxp->oxid, ctxp->state,
2737 phba->cfg_nvme_seg_cnt);
2738 return NULL;
2739 }
2740 nsegs = rsp->sg_cnt;
2741
2742 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2743 nvmewqe = ctxp->wqeq;
2744 if (nvmewqe == NULL) {
2745
2746 nvmewqe = ctxp->ctxbuf->iocbq;
2747 if (nvmewqe == NULL) {
2748 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2749 "6110 NVMET prep FCP wqe: No "
2750 "WQE: NPORT x%x oxid x%x ste %d\n",
2751 ctxp->sid, ctxp->oxid, ctxp->state);
2752 return NULL;
2753 }
2754 ctxp->wqeq = nvmewqe;
2755 xc = 0;
2756 nvmewqe->sli4_lxritag = NO_XRI;
2757 nvmewqe->sli4_xritag = NO_XRI;
2758 }
2759
2760
2761 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2762 (ctxp->entry_cnt == 1)) ||
2763 (ctxp->state == LPFC_NVME_STE_DATA)) {
2764 wqe = &nvmewqe->wqe;
2765 } else {
2766 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2767 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2768 ctxp->state, ctxp->entry_cnt);
2769 return NULL;
2770 }
2771
2772 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2773 switch (rsp->op) {
2774 case NVMET_FCOP_READDATA:
2775 case NVMET_FCOP_READDATA_RSP:
2776
2777 memcpy(&wqe->words[7],
2778 &lpfc_tsend_cmd_template.words[7],
2779 sizeof(uint32_t) * 5);
2780
2781
2782 sgel = &rsp->sg[0];
2783 physaddr = sg_dma_address(sgel);
2784 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2785 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2786 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2787 wqe->fcp_tsend.bde.addrHigh =
2788 cpu_to_le32(putPaddrHigh(physaddr));
2789
2790
2791 wqe->fcp_tsend.payload_offset_len = 0;
2792
2793
2794 wqe->fcp_tsend.relative_offset = ctxp->offset;
2795
2796
2797 wqe->fcp_tsend.reserved = 0;
2798
2799
2800 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2801 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2802 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2803 nvmewqe->sli4_xritag);
2804
2805
2806
2807
2808 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2809
2810
2811 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2812 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2813
2814
2815 if (!xc)
2816 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2817
2818
2819 do_pbde = 0;
2820
2821
2822 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2823
2824
2825 sgl->addr_hi = 0;
2826 sgl->addr_lo = 0;
2827 sgl->word2 = 0;
2828 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2829 sgl->word2 = cpu_to_le32(sgl->word2);
2830 sgl->sge_len = 0;
2831 sgl++;
2832 sgl->addr_hi = 0;
2833 sgl->addr_lo = 0;
2834 sgl->word2 = 0;
2835 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2836 sgl->word2 = cpu_to_le32(sgl->word2);
2837 sgl->sge_len = 0;
2838 sgl++;
2839 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2840 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2841
2842
2843
2844 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2845 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2846 bf_set(wqe_sup,
2847 &wqe->fcp_tsend.wqe_com, 1);
2848 } else {
2849 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2850 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2851 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2852 ((rsp->rsplen >> 2) - 1));
2853 memcpy(&wqe->words[16], rsp->rspaddr,
2854 rsp->rsplen);
2855 }
2856 } else {
2857 atomic_inc(&tgtp->xmt_fcp_read);
2858
2859
2860 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2861 }
2862 break;
2863
2864 case NVMET_FCOP_WRITEDATA:
2865
2866 memcpy(&wqe->words[3],
2867 &lpfc_treceive_cmd_template.words[3],
2868 sizeof(uint32_t) * 9);
2869
2870
2871 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2872 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2873 wqe->fcp_treceive.bde.addrLow = 0;
2874 wqe->fcp_treceive.bde.addrHigh = 0;
2875
2876
2877 wqe->fcp_treceive.relative_offset = ctxp->offset;
2878
2879
2880 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2881 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2882 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2883 nvmewqe->sli4_xritag);
2884
2885
2886
2887
2888 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2889
2890
2891 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2892 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2893
2894
2895 if (!xc)
2896 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2897
2898
2899 if (phba->cfg_enable_pbde) {
2900 do_pbde = 1;
2901 } else {
2902 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2903 do_pbde = 0;
2904 }
2905
2906
2907 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2908
2909
2910 sgl->addr_hi = 0;
2911 sgl->addr_lo = 0;
2912 sgl->word2 = 0;
2913 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2914 sgl->word2 = cpu_to_le32(sgl->word2);
2915 sgl->sge_len = 0;
2916 sgl++;
2917 sgl->addr_hi = 0;
2918 sgl->addr_lo = 0;
2919 sgl->word2 = 0;
2920 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2921 sgl->word2 = cpu_to_le32(sgl->word2);
2922 sgl->sge_len = 0;
2923 sgl++;
2924 atomic_inc(&tgtp->xmt_fcp_write);
2925 break;
2926
2927 case NVMET_FCOP_RSP:
2928
2929 memcpy(&wqe->words[4],
2930 &lpfc_trsp_cmd_template.words[4],
2931 sizeof(uint32_t) * 8);
2932
2933
2934 physaddr = rsp->rspdma;
2935 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2936 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2937 wqe->fcp_trsp.bde.addrLow =
2938 cpu_to_le32(putPaddrLow(physaddr));
2939 wqe->fcp_trsp.bde.addrHigh =
2940 cpu_to_le32(putPaddrHigh(physaddr));
2941
2942
2943 wqe->fcp_trsp.response_len = rsp->rsplen;
2944
2945
2946 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2947 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2948 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2949 nvmewqe->sli4_xritag);
2950
2951
2952
2953
2954 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2955
2956
2957 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2958 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2959
2960
2961 if (xc)
2962 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2963
2964
2965
2966 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2967
2968 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2969 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2970 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2971 ((rsp->rsplen >> 2) - 1));
2972 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2973 }
2974 do_pbde = 0;
2975
2976
2977 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2978
2979
2980 nsegs = 0;
2981 sgl->word2 = 0;
2982 atomic_inc(&tgtp->xmt_fcp_rsp);
2983 break;
2984
2985 default:
2986 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2987 "6064 Unknown Rsp Op %d\n",
2988 rsp->op);
2989 return NULL;
2990 }
2991
2992 nvmewqe->retry = 1;
2993 nvmewqe->vport = phba->pport;
2994 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2995 nvmewqe->context1 = ndlp;
2996
2997 for_each_sg(rsp->sg, sgel, nsegs, i) {
2998 physaddr = sg_dma_address(sgel);
2999 cnt = sg_dma_len(sgel);
3000 sgl->addr_hi = putPaddrHigh(physaddr);
3001 sgl->addr_lo = putPaddrLow(physaddr);
3002 sgl->word2 = 0;
3003 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3004 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3005 if ((i+1) == rsp->sg_cnt)
3006 bf_set(lpfc_sli4_sge_last, sgl, 1);
3007 sgl->word2 = cpu_to_le32(sgl->word2);
3008 sgl->sge_len = cpu_to_le32(cnt);
3009 if (i == 0) {
3010 bde = (struct ulp_bde64 *)&wqe->words[13];
3011 if (do_pbde) {
3012
3013 bde->addrLow = sgl->addr_lo;
3014 bde->addrHigh = sgl->addr_hi;
3015 bde->tus.f.bdeSize =
3016 le32_to_cpu(sgl->sge_len);
3017 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3018 bde->tus.w = cpu_to_le32(bde->tus.w);
3019 } else {
3020 memset(bde, 0, sizeof(struct ulp_bde64));
3021 }
3022 }
3023 sgl++;
3024 ctxp->offset += cnt;
3025 }
3026 ctxp->state = LPFC_NVME_STE_DATA;
3027 ctxp->entry_cnt++;
3028 return nvmewqe;
3029}
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041static void
3042lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3043 struct lpfc_wcqe_complete *wcqe)
3044{
3045 struct lpfc_async_xchg_ctx *ctxp;
3046 struct lpfc_nvmet_tgtport *tgtp;
3047 uint32_t result;
3048 unsigned long flags;
3049 bool released = false;
3050
3051 ctxp = cmdwqe->context2;
3052 result = wcqe->parameter;
3053
3054 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3055 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3056 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3057
3058 spin_lock_irqsave(&ctxp->ctxlock, flags);
3059 ctxp->state = LPFC_NVME_STE_DONE;
3060
3061
3062
3063
3064 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3065 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3066 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3067 list_del_init(&ctxp->list);
3068 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3069 released = true;
3070 }
3071 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3072 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3073 atomic_inc(&tgtp->xmt_abort_rsp);
3074
3075 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3076 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3077 "WCQE: %08x %08x %08x %08x\n",
3078 ctxp->oxid, ctxp->flag, released,
3079 wcqe->word0, wcqe->total_data_placed,
3080 result, wcqe->word3);
3081
3082 cmdwqe->context2 = NULL;
3083 cmdwqe->context3 = NULL;
3084
3085
3086
3087
3088 if (released)
3089 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3090
3091
3092 lpfc_sli_release_iocbq(phba, cmdwqe);
3093
3094
3095
3096
3097
3098}
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110static void
3111lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3112 struct lpfc_wcqe_complete *wcqe)
3113{
3114 struct lpfc_async_xchg_ctx *ctxp;
3115 struct lpfc_nvmet_tgtport *tgtp;
3116 unsigned long flags;
3117 uint32_t result;
3118 bool released = false;
3119
3120 ctxp = cmdwqe->context2;
3121 result = wcqe->parameter;
3122
3123 if (!ctxp) {
3124
3125 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3126 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3127 wcqe->word0, wcqe->total_data_placed,
3128 result, wcqe->word3);
3129 return;
3130 }
3131
3132 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3133 spin_lock_irqsave(&ctxp->ctxlock, flags);
3134 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3135 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3136
3137
3138 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3139 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3140 "6112 ABTS Wrong state:%d oxid x%x\n",
3141 ctxp->state, ctxp->oxid);
3142 }
3143
3144
3145
3146
3147 ctxp->state = LPFC_NVME_STE_DONE;
3148 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3149 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3150 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3151 list_del_init(&ctxp->list);
3152 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3153 released = true;
3154 }
3155 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3156 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3157 atomic_inc(&tgtp->xmt_abort_rsp);
3158
3159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3160 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3161 "WCQE: %08x %08x %08x %08x\n",
3162 ctxp->oxid, ctxp->flag, released,
3163 wcqe->word0, wcqe->total_data_placed,
3164 result, wcqe->word3);
3165
3166 cmdwqe->context2 = NULL;
3167 cmdwqe->context3 = NULL;
3168
3169
3170
3171
3172 if (released)
3173 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3174
3175
3176
3177
3178
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191static void
3192lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3193 struct lpfc_wcqe_complete *wcqe)
3194{
3195 struct lpfc_async_xchg_ctx *ctxp;
3196 struct lpfc_nvmet_tgtport *tgtp;
3197 uint32_t result;
3198
3199 ctxp = cmdwqe->context2;
3200 result = wcqe->parameter;
3201
3202 if (phba->nvmet_support) {
3203 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3204 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3205 }
3206
3207 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3208 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3209 ctxp, wcqe->word0, wcqe->total_data_placed,
3210 result, wcqe->word3);
3211
3212 if (!ctxp) {
3213 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3214 "6415 NVMET LS Abort No ctx: WCQE: "
3215 "%08x %08x %08x %08x\n",
3216 wcqe->word0, wcqe->total_data_placed,
3217 result, wcqe->word3);
3218
3219 lpfc_sli_release_iocbq(phba, cmdwqe);
3220 return;
3221 }
3222
3223 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3224 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3225 "6416 NVMET LS abort cmpl state mismatch: "
3226 "oxid x%x: %d %d\n",
3227 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3228 }
3229
3230 cmdwqe->context2 = NULL;
3231 cmdwqe->context3 = NULL;
3232 lpfc_sli_release_iocbq(phba, cmdwqe);
3233 kfree(ctxp);
3234}
3235
3236static int
3237lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3238 struct lpfc_async_xchg_ctx *ctxp,
3239 uint32_t sid, uint16_t xri)
3240{
3241 struct lpfc_nvmet_tgtport *tgtp = NULL;
3242 struct lpfc_iocbq *abts_wqeq;
3243 union lpfc_wqe128 *wqe_abts;
3244 struct lpfc_nodelist *ndlp;
3245
3246 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3247 "6067 ABTS: sid %x xri x%x/x%x\n",
3248 sid, xri, ctxp->wqeq->sli4_xritag);
3249
3250 if (phba->nvmet_support && phba->targetport)
3251 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3252
3253 ndlp = lpfc_findnode_did(phba->pport, sid);
3254 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3255 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3256 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3257 if (tgtp)
3258 atomic_inc(&tgtp->xmt_abort_rsp_error);
3259 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3260 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3261 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3262
3263
3264 return 0;
3265 }
3266
3267 abts_wqeq = ctxp->wqeq;
3268 wqe_abts = &abts_wqeq->wqe;
3269
3270
3271
3272
3273
3274 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3275
3276
3277 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3278 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3279 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3280 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3281 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3282
3283
3284 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3285 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3286 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3287 abts_wqeq->sli4_xritag);
3288
3289
3290 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3291 CMD_XMIT_SEQUENCE64_WQE);
3292 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3293 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3294 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3295
3296
3297 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3298
3299
3300 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3301
3302 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3303
3304
3305 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3306 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3307 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3308 LPFC_WQE_LENLOC_WORD12);
3309 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3310 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3311
3312
3313 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3314 LPFC_WQE_CQ_ID_DEFAULT);
3315 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3316 OTHER_COMMAND);
3317
3318 abts_wqeq->vport = phba->pport;
3319 abts_wqeq->context1 = ndlp;
3320 abts_wqeq->context2 = ctxp;
3321 abts_wqeq->context3 = NULL;
3322 abts_wqeq->rsvd2 = 0;
3323
3324 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3325 abts_wqeq->iocb.ulpLe = 1;
3326
3327 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3328 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3329 xri, abts_wqeq->iotag);
3330 return 1;
3331}
3332
3333static int
3334lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3335 struct lpfc_async_xchg_ctx *ctxp,
3336 uint32_t sid, uint16_t xri)
3337{
3338 struct lpfc_nvmet_tgtport *tgtp;
3339 struct lpfc_iocbq *abts_wqeq;
3340 struct lpfc_nodelist *ndlp;
3341 unsigned long flags;
3342 u8 opt;
3343 int rc;
3344
3345 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3346 if (!ctxp->wqeq) {
3347 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3348 ctxp->wqeq->hba_wqidx = 0;
3349 }
3350
3351 ndlp = lpfc_findnode_did(phba->pport, sid);
3352 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3353 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3354 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3355 atomic_inc(&tgtp->xmt_abort_rsp_error);
3356 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3357 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3358 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3359
3360
3361 spin_lock_irqsave(&ctxp->ctxlock, flags);
3362 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3363 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3364 return 0;
3365 }
3366
3367
3368 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3369 spin_lock_irqsave(&ctxp->ctxlock, flags);
3370 if (!ctxp->abort_wqeq) {
3371 atomic_inc(&tgtp->xmt_abort_rsp_error);
3372 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3373 "6161 ABORT failed: No wqeqs: "
3374 "xri: x%x\n", ctxp->oxid);
3375
3376 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3377 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3378 return 0;
3379 }
3380 abts_wqeq = ctxp->abort_wqeq;
3381 ctxp->state = LPFC_NVME_STE_ABORT;
3382 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3383 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3384
3385
3386 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3387 "6162 ABORT Request to rport DID x%06x "
3388 "for xri x%x x%x\n",
3389 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3390
3391
3392
3393
3394 spin_lock_irqsave(&phba->hbalock, flags);
3395
3396 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3397 spin_unlock_irqrestore(&phba->hbalock, flags);
3398 atomic_inc(&tgtp->xmt_abort_rsp_error);
3399 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3400 "6163 Driver in reset cleanup - flushing "
3401 "NVME Req now. hba_flag x%x oxid x%x\n",
3402 phba->hba_flag, ctxp->oxid);
3403 lpfc_sli_release_iocbq(phba, abts_wqeq);
3404 spin_lock_irqsave(&ctxp->ctxlock, flags);
3405 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3406 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3407 return 0;
3408 }
3409
3410
3411 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3412 spin_unlock_irqrestore(&phba->hbalock, flags);
3413 atomic_inc(&tgtp->xmt_abort_rsp_error);
3414 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3415 "6164 Outstanding NVME I/O Abort Request "
3416 "still pending on oxid x%x\n",
3417 ctxp->oxid);
3418 lpfc_sli_release_iocbq(phba, abts_wqeq);
3419 spin_lock_irqsave(&ctxp->ctxlock, flags);
3420 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3421 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3422 return 0;
3423 }
3424
3425
3426 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3427
3428 lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3429
3430
3431 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3432 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3433 abts_wqeq->iocb_cmpl = NULL;
3434 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3435 abts_wqeq->context2 = ctxp;
3436 abts_wqeq->vport = phba->pport;
3437 if (!ctxp->hdwq)
3438 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3439
3440 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3441 spin_unlock_irqrestore(&phba->hbalock, flags);
3442 if (rc == WQE_SUCCESS) {
3443 atomic_inc(&tgtp->xmt_abort_sol);
3444 return 0;
3445 }
3446
3447 atomic_inc(&tgtp->xmt_abort_rsp_error);
3448 spin_lock_irqsave(&ctxp->ctxlock, flags);
3449 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3450 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3451 lpfc_sli_release_iocbq(phba, abts_wqeq);
3452 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3453 "6166 Failed ABORT issue_wqe with status x%x "
3454 "for oxid x%x.\n",
3455 rc, ctxp->oxid);
3456 return 1;
3457}
3458
3459static int
3460lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3461 struct lpfc_async_xchg_ctx *ctxp,
3462 uint32_t sid, uint16_t xri)
3463{
3464 struct lpfc_nvmet_tgtport *tgtp;
3465 struct lpfc_iocbq *abts_wqeq;
3466 unsigned long flags;
3467 bool released = false;
3468 int rc;
3469
3470 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3471 if (!ctxp->wqeq) {
3472 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3473 ctxp->wqeq->hba_wqidx = 0;
3474 }
3475
3476 if (ctxp->state == LPFC_NVME_STE_FREE) {
3477 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3478 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3479 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3480 rc = WQE_BUSY;
3481 goto aerr;
3482 }
3483 ctxp->state = LPFC_NVME_STE_ABORT;
3484 ctxp->entry_cnt++;
3485 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3486 if (rc == 0)
3487 goto aerr;
3488
3489 spin_lock_irqsave(&phba->hbalock, flags);
3490 abts_wqeq = ctxp->wqeq;
3491 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3492 abts_wqeq->iocb_cmpl = NULL;
3493 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3494 if (!ctxp->hdwq)
3495 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3496
3497 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3498 spin_unlock_irqrestore(&phba->hbalock, flags);
3499 if (rc == WQE_SUCCESS) {
3500 return 0;
3501 }
3502
3503aerr:
3504 spin_lock_irqsave(&ctxp->ctxlock, flags);
3505 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3506 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3507 list_del_init(&ctxp->list);
3508 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3509 released = true;
3510 }
3511 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3512 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3513
3514 atomic_inc(&tgtp->xmt_abort_rsp_error);
3515 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3516 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3517 "(%x)\n",
3518 ctxp->oxid, rc, released);
3519 if (released)
3520 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3521 return 1;
3522}
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532int
3533lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3534 struct lpfc_async_xchg_ctx *ctxp,
3535 uint32_t sid, uint16_t xri)
3536{
3537 struct lpfc_nvmet_tgtport *tgtp = NULL;
3538 struct lpfc_iocbq *abts_wqeq;
3539 unsigned long flags;
3540 int rc;
3541
3542 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3543 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3544 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3545 ctxp->entry_cnt++;
3546 } else {
3547 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3548 "6418 NVMET LS abort state mismatch "
3549 "IO x%x: %d %d\n",
3550 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3551 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3552 }
3553
3554 if (phba->nvmet_support && phba->targetport)
3555 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3556
3557 if (!ctxp->wqeq) {
3558
3559 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3560 if (!ctxp->wqeq) {
3561 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3562 "6068 Abort failed: No wqeqs: "
3563 "xri: x%x\n", xri);
3564
3565 kfree(ctxp);
3566 return 0;
3567 }
3568 }
3569 abts_wqeq = ctxp->wqeq;
3570
3571 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3572 rc = WQE_BUSY;
3573 goto out;
3574 }
3575
3576 spin_lock_irqsave(&phba->hbalock, flags);
3577 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3578 abts_wqeq->iocb_cmpl = NULL;
3579 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3580 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3581 spin_unlock_irqrestore(&phba->hbalock, flags);
3582 if (rc == WQE_SUCCESS) {
3583 if (tgtp)
3584 atomic_inc(&tgtp->xmt_abort_unsol);
3585 return 0;
3586 }
3587out:
3588 if (tgtp)
3589 atomic_inc(&tgtp->xmt_abort_rsp_error);
3590 abts_wqeq->context2 = NULL;
3591 abts_wqeq->context3 = NULL;
3592 lpfc_sli_release_iocbq(phba, abts_wqeq);
3593 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3594 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3595 return 1;
3596}
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607void
3608lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3609{
3610 struct lpfc_nvmet_tgtport *tgtp;
3611
3612 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
3613 "6203 Invalidating hosthandle x%px\n",
3614 ndlp);
3615
3616 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3617 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3618
3619#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3620
3621 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3622#endif
3623}
3624