1
2
3
4
5
6#include "qla_nvme.h"
7#include <linux/scatterlist.h>
8#include <linux/delay.h>
9#include <linux/nvme.h>
10#include <linux/nvme-fc.h>
11
12static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15{
16 struct qla_nvme_rport *rport;
17 struct nvme_fc_port_info req;
18 int ret;
19
20 if (!IS_ENABLED(CONFIG_NVME_FC))
21 return 0;
22
23 if (!vha->flags.nvme_enabled) {
24 ql_log(ql_log_info, vha, 0x2100,
25 "%s: Not registering target since Host NVME is not enabled\n",
26 __func__);
27 return 0;
28 }
29
30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31 return 0;
32
33 if (!(fcport->nvme_prli_service_param &
34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36 return 0;
37
38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39
40 memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 req.port_name = wwn_to_u64(fcport->port_name);
42 req.node_name = wwn_to_u64(fcport->node_name);
43 req.port_role = 0;
44 req.dev_loss_tmo = 0;
45
46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48
49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51
52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54
55 req.port_id = fcport->d_id.b24;
56
57 ql_log(ql_log_info, vha, 0x2102,
58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 __func__, req.node_name, req.port_name,
60 req.port_id);
61
62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63 &fcport->nvme_remote_port);
64 if (ret) {
65 ql_log(ql_log_warn, vha, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
67 ret);
68 return ret;
69 }
70
71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72 ql_log(ql_log_info, vha, 0x212a,
73 "PortID:%06x Supports SLER\n", req.port_id);
74
75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76 ql_log(ql_log_info, vha, 0x212b,
77 "PortID:%06x Supports PI control\n", req.port_id);
78
79 rport = fcport->nvme_remote_port->private;
80 rport->fcport = fcport;
81
82 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83 return 0;
84}
85
86
87static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88 unsigned int qidx, u16 qsize, void **handle)
89{
90 struct scsi_qla_host *vha;
91 struct qla_hw_data *ha;
92 struct qla_qpair *qpair;
93
94
95 if (qidx)
96 qidx--;
97
98 vha = (struct scsi_qla_host *)lport->private;
99 ha = vha->hw;
100
101 ql_log(ql_log_info, vha, 0x2104,
102 "%s: handle %p, idx =%d, qsize %d\n",
103 __func__, handle, qidx, qsize);
104
105 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
106 ql_log(ql_log_warn, vha, 0x212f,
107 "%s: Illegal qidx=%d. Max=%d\n",
108 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
109 return -EINVAL;
110 }
111
112
113 if (!ha->max_qpairs) {
114 qpair = ha->base_qpair;
115 } else {
116 if (ha->queue_pair_map[qidx]) {
117 *handle = ha->queue_pair_map[qidx];
118 ql_log(ql_log_info, vha, 0x2121,
119 "Returning existing qpair of %p for idx=%x\n",
120 *handle, qidx);
121 return 0;
122 }
123
124 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
125 if (!qpair) {
126 ql_log(ql_log_warn, vha, 0x2122,
127 "Failed to allocate qpair\n");
128 return -EINVAL;
129 }
130 }
131 *handle = qpair;
132
133 return 0;
134}
135
136static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
137{
138 struct srb *sp = container_of(kref, struct srb, cmd_kref);
139 struct nvme_private *priv = (struct nvme_private *)sp->priv;
140 struct nvmefc_fcp_req *fd;
141 struct srb_iocb *nvme;
142 unsigned long flags;
143
144 if (!priv)
145 goto out;
146
147 nvme = &sp->u.iocb_cmd;
148 fd = nvme->u.nvme.desc;
149
150 spin_lock_irqsave(&priv->cmd_lock, flags);
151 priv->sp = NULL;
152 sp->priv = NULL;
153 if (priv->comp_status == QLA_SUCCESS) {
154 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
155 fd->status = NVME_SC_SUCCESS;
156 } else {
157 fd->rcv_rsplen = 0;
158 fd->transferred_length = 0;
159 fd->status = NVME_SC_INTERNAL;
160 }
161 spin_unlock_irqrestore(&priv->cmd_lock, flags);
162
163 fd->done(fd);
164out:
165 qla2xxx_rel_qpair_sp(sp->qpair, sp);
166}
167
168static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
169{
170 struct srb *sp = container_of(kref, struct srb, cmd_kref);
171 struct nvme_private *priv = (struct nvme_private *)sp->priv;
172 struct nvmefc_ls_req *fd;
173 unsigned long flags;
174
175 if (!priv)
176 goto out;
177
178 spin_lock_irqsave(&priv->cmd_lock, flags);
179 priv->sp = NULL;
180 sp->priv = NULL;
181 spin_unlock_irqrestore(&priv->cmd_lock, flags);
182
183 fd = priv->fd;
184 fd->done(fd, priv->comp_status);
185out:
186 qla2x00_rel_sp(sp);
187}
188
189static void qla_nvme_ls_complete(struct work_struct *work)
190{
191 struct nvme_private *priv =
192 container_of(work, struct nvme_private, ls_work);
193
194 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
195}
196
197static void qla_nvme_sp_ls_done(srb_t *sp, int res)
198{
199 struct nvme_private *priv = sp->priv;
200
201 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
202 return;
203
204 if (res)
205 res = -EINVAL;
206
207 priv->comp_status = res;
208 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
209 schedule_work(&priv->ls_work);
210}
211
212
213static void qla_nvme_sp_done(srb_t *sp, int res)
214{
215 struct nvme_private *priv = sp->priv;
216
217 priv->comp_status = res;
218 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
219
220 return;
221}
222
223static void qla_nvme_abort_work(struct work_struct *work)
224{
225 struct nvme_private *priv =
226 container_of(work, struct nvme_private, abort_work);
227 srb_t *sp = priv->sp;
228 fc_port_t *fcport = sp->fcport;
229 struct qla_hw_data *ha = fcport->vha->hw;
230 int rval, abts_done_called = 1;
231
232 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
233 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
234 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
235
236 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
237 goto out;
238
239 if (ha->flags.host_shutting_down) {
240 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
241 "%s Calling done on sp: %p, type: 0x%x\n",
242 __func__, sp, sp->type);
243 sp->done(sp, 0);
244 goto out;
245 }
246
247 rval = ha->isp_ops->abort_command(sp);
248
249 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
250 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
251 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
252 sp, sp->handle, fcport, rval);
253
254
255
256
257
258 if (ql2xasynctmfenable &&
259 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
260 abts_done_called = 0;
261
262
263
264
265
266
267 if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
268 return;
269out:
270
271 kref_put(&sp->cmd_kref, sp->put_fn);
272}
273
274static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
275 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
276{
277 struct nvme_private *priv = fd->private;
278 unsigned long flags;
279
280 spin_lock_irqsave(&priv->cmd_lock, flags);
281 if (!priv->sp) {
282 spin_unlock_irqrestore(&priv->cmd_lock, flags);
283 return;
284 }
285
286 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
287 spin_unlock_irqrestore(&priv->cmd_lock, flags);
288 return;
289 }
290 spin_unlock_irqrestore(&priv->cmd_lock, flags);
291
292 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
293 schedule_work(&priv->abort_work);
294}
295
296static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
297 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
298{
299 struct qla_nvme_rport *qla_rport = rport->private;
300 fc_port_t *fcport = qla_rport->fcport;
301 struct srb_iocb *nvme;
302 struct nvme_private *priv = fd->private;
303 struct scsi_qla_host *vha;
304 int rval = QLA_FUNCTION_FAILED;
305 struct qla_hw_data *ha;
306 srb_t *sp;
307
308 if (!fcport || fcport->deleted)
309 return rval;
310
311 vha = fcport->vha;
312 ha = vha->hw;
313
314 if (!ha->flags.fw_started)
315 return rval;
316
317
318 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
319 if (!sp)
320 return rval;
321
322 sp->type = SRB_NVME_LS;
323 sp->name = "nvme_ls";
324 sp->done = qla_nvme_sp_ls_done;
325 sp->put_fn = qla_nvme_release_ls_cmd_kref;
326 sp->priv = priv;
327 priv->sp = sp;
328 kref_init(&sp->cmd_kref);
329 spin_lock_init(&priv->cmd_lock);
330 nvme = &sp->u.iocb_cmd;
331 priv->fd = fd;
332 nvme->u.nvme.desc = fd;
333 nvme->u.nvme.dir = 0;
334 nvme->u.nvme.dl = 0;
335 nvme->u.nvme.cmd_len = fd->rqstlen;
336 nvme->u.nvme.rsp_len = fd->rsplen;
337 nvme->u.nvme.rsp_dma = fd->rspdma;
338 nvme->u.nvme.timeout_sec = fd->timeout;
339 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
340 fd->rqstlen, DMA_TO_DEVICE);
341 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
342 fd->rqstlen, DMA_TO_DEVICE);
343
344 rval = qla2x00_start_sp(sp);
345 if (rval != QLA_SUCCESS) {
346 ql_log(ql_log_warn, vha, 0x700e,
347 "qla2x00_start_sp failed = %d\n", rval);
348 wake_up(&sp->nvme_ls_waitq);
349 sp->priv = NULL;
350 priv->sp = NULL;
351 qla2x00_rel_sp(sp);
352 return rval;
353 }
354
355 return rval;
356}
357
358static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
359 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
360 struct nvmefc_fcp_req *fd)
361{
362 struct nvme_private *priv = fd->private;
363 unsigned long flags;
364
365 spin_lock_irqsave(&priv->cmd_lock, flags);
366 if (!priv->sp) {
367 spin_unlock_irqrestore(&priv->cmd_lock, flags);
368 return;
369 }
370 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
371 spin_unlock_irqrestore(&priv->cmd_lock, flags);
372 return;
373 }
374 spin_unlock_irqrestore(&priv->cmd_lock, flags);
375
376 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
377 schedule_work(&priv->abort_work);
378}
379
380static inline int qla2x00_start_nvme_mq(srb_t *sp)
381{
382 unsigned long flags;
383 uint32_t *clr_ptr;
384 uint32_t handle;
385 struct cmd_nvme *cmd_pkt;
386 uint16_t cnt, i;
387 uint16_t req_cnt;
388 uint16_t tot_dsds;
389 uint16_t avail_dsds;
390 struct dsd64 *cur_dsd;
391 struct req_que *req = NULL;
392 struct scsi_qla_host *vha = sp->fcport->vha;
393 struct qla_hw_data *ha = vha->hw;
394 struct qla_qpair *qpair = sp->qpair;
395 struct srb_iocb *nvme = &sp->u.iocb_cmd;
396 struct scatterlist *sgl, *sg;
397 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
398 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
399 uint32_t rval = QLA_SUCCESS;
400
401
402 req = qpair->req;
403 tot_dsds = fd->sg_cnt;
404
405
406 spin_lock_irqsave(&qpair->qp_lock, flags);
407
408 handle = qla2xxx_get_next_handle(req);
409 if (handle == 0) {
410 rval = -EBUSY;
411 goto queuing_error;
412 }
413 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
414 if (req->cnt < (req_cnt + 2)) {
415 if (IS_SHADOW_REG_CAPABLE(ha)) {
416 cnt = *req->out_ptr;
417 } else {
418 cnt = rd_reg_dword_relaxed(req->req_q_out);
419 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
420 goto queuing_error;
421 }
422
423 if (req->ring_index < cnt)
424 req->cnt = cnt - req->ring_index;
425 else
426 req->cnt = req->length - (req->ring_index - cnt);
427
428 if (req->cnt < (req_cnt + 2)){
429 rval = -EBUSY;
430 goto queuing_error;
431 }
432 }
433
434 if (unlikely(!fd->sqid)) {
435 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
436 nvme->u.nvme.aen_op = 1;
437 atomic_inc(&ha->nvme_active_aen_cnt);
438 }
439 }
440
441
442 req->current_outstanding_cmd = handle;
443 req->outstanding_cmds[handle] = sp;
444 sp->handle = handle;
445 req->cnt -= req_cnt;
446
447 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
448 cmd_pkt->handle = make_handle(req->id, handle);
449
450
451 clr_ptr = (uint32_t *)cmd_pkt + 2;
452 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
453
454 cmd_pkt->entry_status = 0;
455
456
457 cmd_pkt->entry_type = COMMAND_NVME;
458
459
460 if (fd->io_dir == NVMEFC_FCP_READ) {
461 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
462 qpair->counters.input_bytes += fd->payload_length;
463 qpair->counters.input_requests++;
464 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
465 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
466 if ((vha->flags.nvme_first_burst) &&
467 (sp->fcport->nvme_prli_service_param &
468 NVME_PRLI_SP_FIRST_BURST)) {
469 if ((fd->payload_length <=
470 sp->fcport->nvme_first_burst_size) ||
471 (sp->fcport->nvme_first_burst_size == 0))
472 cmd_pkt->control_flags |=
473 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
474 }
475 qpair->counters.output_bytes += fd->payload_length;
476 qpair->counters.output_requests++;
477 } else if (fd->io_dir == 0) {
478 cmd_pkt->control_flags = 0;
479 }
480
481 if (sp->fcport->edif.enable && fd->io_dir != 0)
482 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
483
484
485 if (vha->flags.nvme2_enabled &&
486 cmd->sqe.common.opcode == nvme_admin_async_event) {
487 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
488 }
489
490
491 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
492 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
493 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
494 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
495 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
496
497
498 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
499 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
500
501
502 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
503 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
504
505 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
506 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
507
508
509 avail_dsds = 1;
510 cur_dsd = &cmd_pkt->nvme_dsd;
511 sgl = fd->first_sgl;
512
513
514 for_each_sg(sgl, sg, tot_dsds, i) {
515 cont_a64_entry_t *cont_pkt;
516
517
518 if (avail_dsds == 0) {
519
520
521
522
523
524
525 req->ring_index++;
526 if (req->ring_index == req->length) {
527 req->ring_index = 0;
528 req->ring_ptr = req->ring;
529 } else {
530 req->ring_ptr++;
531 }
532 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
533 put_unaligned_le32(CONTINUE_A64_TYPE,
534 &cont_pkt->entry_type);
535
536 cur_dsd = cont_pkt->dsd;
537 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
538 }
539
540 append_dsd64(&cur_dsd, sg);
541 avail_dsds--;
542 }
543
544
545 cmd_pkt->entry_count = (uint8_t)req_cnt;
546 wmb();
547
548
549 req->ring_index++;
550 if (req->ring_index == req->length) {
551 req->ring_index = 0;
552 req->ring_ptr = req->ring;
553 } else {
554 req->ring_ptr++;
555 }
556
557
558 if (!nvme->u.nvme.aen_op)
559 sp->qpair->cmd_cnt++;
560
561
562 wrt_reg_dword(req->req_q_in, req->ring_index);
563
564queuing_error:
565 spin_unlock_irqrestore(&qpair->qp_lock, flags);
566
567 return rval;
568}
569
570
571static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
572 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
573 struct nvmefc_fcp_req *fd)
574{
575 fc_port_t *fcport;
576 struct srb_iocb *nvme;
577 struct scsi_qla_host *vha;
578 int rval;
579 srb_t *sp;
580 struct qla_qpair *qpair = hw_queue_handle;
581 struct nvme_private *priv = fd->private;
582 struct qla_nvme_rport *qla_rport = rport->private;
583
584 if (!priv) {
585
586 return -ENODEV;
587 }
588
589 fcport = qla_rport->fcport;
590
591 if (unlikely(!qpair || !fcport || fcport->deleted))
592 return -EBUSY;
593
594 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
595 return -ENODEV;
596
597 vha = fcport->vha;
598
599 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
600 return -EBUSY;
601
602
603
604
605
606
607
608
609 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
610 return -EBUSY;
611
612
613 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
614 if (!sp)
615 return -EBUSY;
616
617 init_waitqueue_head(&sp->nvme_ls_waitq);
618 kref_init(&sp->cmd_kref);
619 spin_lock_init(&priv->cmd_lock);
620 sp->priv = priv;
621 priv->sp = sp;
622 sp->type = SRB_NVME_CMD;
623 sp->name = "nvme_cmd";
624 sp->done = qla_nvme_sp_done;
625 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
626 sp->qpair = qpair;
627 sp->vha = vha;
628 sp->cmd_sp = sp;
629 nvme = &sp->u.iocb_cmd;
630 nvme->u.nvme.desc = fd;
631
632 rval = qla2x00_start_nvme_mq(sp);
633 if (rval != QLA_SUCCESS) {
634 ql_log(ql_log_warn, vha, 0x212d,
635 "qla2x00_start_nvme_mq failed = %d\n", rval);
636 wake_up(&sp->nvme_ls_waitq);
637 sp->priv = NULL;
638 priv->sp = NULL;
639 qla2xxx_rel_qpair_sp(sp->qpair, sp);
640 }
641
642 return rval;
643}
644
645static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
646{
647 struct scsi_qla_host *vha = lport->private;
648
649 ql_log(ql_log_info, vha, 0x210f,
650 "localport delete of %p completed.\n", vha->nvme_local_port);
651 vha->nvme_local_port = NULL;
652 complete(&vha->nvme_del_done);
653}
654
655static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
656{
657 fc_port_t *fcport;
658 struct qla_nvme_rport *qla_rport = rport->private;
659
660 fcport = qla_rport->fcport;
661 fcport->nvme_remote_port = NULL;
662 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
663 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
664 ql_log(ql_log_info, fcport->vha, 0x2110,
665 "remoteport_delete of %p %8phN completed.\n",
666 fcport, fcport->port_name);
667 complete(&fcport->nvme_del_done);
668}
669
670static struct nvme_fc_port_template qla_nvme_fc_transport = {
671 .localport_delete = qla_nvme_localport_delete,
672 .remoteport_delete = qla_nvme_remoteport_delete,
673 .create_queue = qla_nvme_alloc_queue,
674 .delete_queue = NULL,
675 .ls_req = qla_nvme_ls_req,
676 .ls_abort = qla_nvme_ls_abort,
677 .fcp_io = qla_nvme_post_cmd,
678 .fcp_abort = qla_nvme_fcp_abort,
679 .max_hw_queues = 8,
680 .max_sgl_segments = 1024,
681 .max_dif_sgl_segments = 64,
682 .dma_boundary = 0xFFFFFFFF,
683 .local_priv_sz = 8,
684 .remote_priv_sz = sizeof(struct qla_nvme_rport),
685 .lsrqst_priv_sz = sizeof(struct nvme_private),
686 .fcprqst_priv_sz = sizeof(struct nvme_private),
687};
688
689void qla_nvme_unregister_remote_port(struct fc_port *fcport)
690{
691 int ret;
692
693 if (!IS_ENABLED(CONFIG_NVME_FC))
694 return;
695
696 ql_log(ql_log_warn, fcport->vha, 0x2112,
697 "%s: unregister remoteport on %p %8phN\n",
698 __func__, fcport, fcport->port_name);
699
700 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
701 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
702
703 init_completion(&fcport->nvme_del_done);
704 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
705 if (ret)
706 ql_log(ql_log_info, fcport->vha, 0x2114,
707 "%s: Failed to unregister nvme_remote_port (%d)\n",
708 __func__, ret);
709 wait_for_completion(&fcport->nvme_del_done);
710}
711
712void qla_nvme_delete(struct scsi_qla_host *vha)
713{
714 int nv_ret;
715
716 if (!IS_ENABLED(CONFIG_NVME_FC))
717 return;
718
719 if (vha->nvme_local_port) {
720 init_completion(&vha->nvme_del_done);
721 ql_log(ql_log_info, vha, 0x2116,
722 "unregister localport=%p\n",
723 vha->nvme_local_port);
724 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
725 if (nv_ret)
726 ql_log(ql_log_info, vha, 0x2115,
727 "Unregister of localport failed\n");
728 else
729 wait_for_completion(&vha->nvme_del_done);
730 }
731}
732
733int qla_nvme_register_hba(struct scsi_qla_host *vha)
734{
735 struct nvme_fc_port_template *tmpl;
736 struct qla_hw_data *ha;
737 struct nvme_fc_port_info pinfo;
738 int ret = -EINVAL;
739
740 if (!IS_ENABLED(CONFIG_NVME_FC))
741 return ret;
742
743 ha = vha->hw;
744 tmpl = &qla_nvme_fc_transport;
745
746 WARN_ON(vha->nvme_local_port);
747
748 qla_nvme_fc_transport.max_hw_queues =
749 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
750 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
751
752 pinfo.node_name = wwn_to_u64(vha->node_name);
753 pinfo.port_name = wwn_to_u64(vha->port_name);
754 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
755 pinfo.port_id = vha->d_id.b24;
756
757 ql_log(ql_log_info, vha, 0xffff,
758 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
759 pinfo.node_name, pinfo.port_name, pinfo.port_id);
760 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
761
762 ret = nvme_fc_register_localport(&pinfo, tmpl,
763 get_device(&ha->pdev->dev), &vha->nvme_local_port);
764 if (ret) {
765 ql_log(ql_log_warn, vha, 0xffff,
766 "register_localport failed: ret=%x\n", ret);
767 } else {
768 vha->nvme_local_port->private = vha;
769 }
770
771 return ret;
772}
773
774void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
775{
776 struct qla_hw_data *ha;
777
778 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
779 return;
780
781 ha = orig_sp->fcport->vha->hw;
782
783 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
784
785 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
786 abt->drv.abts_rty_cnt = cpu_to_le16(2);
787
788 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
789
790 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
791}
792
793void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
794{
795 u16 comp_status;
796 struct scsi_qla_host *vha;
797
798 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
799 return;
800
801 vha = orig_sp->fcport->vha;
802
803 comp_status = le16_to_cpu(abt->comp_status);
804 switch (comp_status) {
805 case CS_RESET:
806 case CS_ABORTED:
807
808 case CS_TIMEOUT:
809
810 case CS_PORT_UNAVAILABLE:
811
812 case CS_PORT_LOGGED_OUT:
813
814 case CS_PORT_CONFIG_CHG:
815 ql_dbg(ql_dbg_async, vha, 0xf09d,
816 "Abort I/O IOCB completed with error, comp_status=%x\n",
817 comp_status);
818 break;
819
820
821 case CS_REJECT_RECEIVED:
822 ql_dbg(ql_dbg_async, vha, 0xf09e,
823 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
824 abt->fw.ba_rjt_vendorUnique);
825 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
826 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
827 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
828 break;
829
830 case CS_COMPLETE:
831 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
832 "IOCB request is completed successfully comp_status=%x\n",
833 comp_status);
834 break;
835
836 case CS_IOCB_ERROR:
837 ql_dbg(ql_dbg_async, vha, 0xf0a0,
838 "IOCB request is failed, comp_status=%x\n", comp_status);
839 break;
840
841 default:
842 ql_dbg(ql_dbg_async, vha, 0xf0a1,
843 "Invalid Abort IO IOCB Completion Status %x\n",
844 comp_status);
845 break;
846 }
847}
848
849inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
850{
851 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
852 return;
853 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
854}
855