1
2
3
4
5
6#include "qla_nvme.h"
7#include <linux/scatterlist.h>
8#include <linux/delay.h>
9#include <linux/nvme.h>
10#include <linux/nvme-fc.h>
11
12static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15{
16 struct qla_nvme_rport *rport;
17 struct nvme_fc_port_info req;
18 int ret;
19
20 if (!IS_ENABLED(CONFIG_NVME_FC))
21 return 0;
22
23 if (!vha->flags.nvme_enabled) {
24 ql_log(ql_log_info, vha, 0x2100,
25 "%s: Not registering target since Host NVME is not enabled\n",
26 __func__);
27 return 0;
28 }
29
30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31 return 0;
32
33 if (!(fcport->nvme_prli_service_param &
34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36 return 0;
37
38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39
40 memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 req.port_name = wwn_to_u64(fcport->port_name);
42 req.node_name = wwn_to_u64(fcport->node_name);
43 req.port_role = 0;
44 req.dev_loss_tmo = 0;
45
46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48
49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51
52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54
55 req.port_id = fcport->d_id.b24;
56
57 ql_log(ql_log_info, vha, 0x2102,
58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 __func__, req.node_name, req.port_name,
60 req.port_id);
61
62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63 &fcport->nvme_remote_port);
64 if (ret) {
65 ql_log(ql_log_warn, vha, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
67 ret);
68 return ret;
69 }
70
71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72 ql_log(ql_log_info, vha, 0x212a,
73 "PortID:%06x Supports SLER\n", req.port_id);
74
75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76 ql_log(ql_log_info, vha, 0x212b,
77 "PortID:%06x Supports PI control\n", req.port_id);
78
79 rport = fcport->nvme_remote_port->private;
80 rport->fcport = fcport;
81
82 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83 return 0;
84}
85
86
87static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88 unsigned int qidx, u16 qsize, void **handle)
89{
90 struct scsi_qla_host *vha;
91 struct qla_hw_data *ha;
92 struct qla_qpair *qpair;
93
94 if (!qidx)
95 qidx++;
96
97 vha = (struct scsi_qla_host *)lport->private;
98 ha = vha->hw;
99
100 ql_log(ql_log_info, vha, 0x2104,
101 "%s: handle %p, idx =%d, qsize %d\n",
102 __func__, handle, qidx, qsize);
103
104 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
105 ql_log(ql_log_warn, vha, 0x212f,
106 "%s: Illegal qidx=%d. Max=%d\n",
107 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
108 return -EINVAL;
109 }
110
111 if (ha->queue_pair_map[qidx]) {
112 *handle = ha->queue_pair_map[qidx];
113 ql_log(ql_log_info, vha, 0x2121,
114 "Returning existing qpair of %p for idx=%x\n",
115 *handle, qidx);
116 return 0;
117 }
118
119 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
120 if (qpair == NULL) {
121 ql_log(ql_log_warn, vha, 0x2122,
122 "Failed to allocate qpair\n");
123 return -EINVAL;
124 }
125 *handle = qpair;
126
127 return 0;
128}
129
130static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
131{
132 struct srb *sp = container_of(kref, struct srb, cmd_kref);
133 struct nvme_private *priv = (struct nvme_private *)sp->priv;
134 struct nvmefc_fcp_req *fd;
135 struct srb_iocb *nvme;
136 unsigned long flags;
137
138 if (!priv)
139 goto out;
140
141 nvme = &sp->u.iocb_cmd;
142 fd = nvme->u.nvme.desc;
143
144 spin_lock_irqsave(&priv->cmd_lock, flags);
145 priv->sp = NULL;
146 sp->priv = NULL;
147 if (priv->comp_status == QLA_SUCCESS) {
148 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
149 fd->status = NVME_SC_SUCCESS;
150 } else {
151 fd->rcv_rsplen = 0;
152 fd->transferred_length = 0;
153 fd->status = NVME_SC_INTERNAL;
154 }
155 spin_unlock_irqrestore(&priv->cmd_lock, flags);
156
157 fd->done(fd);
158out:
159 qla2xxx_rel_qpair_sp(sp->qpair, sp);
160}
161
162static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
163{
164 struct srb *sp = container_of(kref, struct srb, cmd_kref);
165 struct nvme_private *priv = (struct nvme_private *)sp->priv;
166 struct nvmefc_ls_req *fd;
167 unsigned long flags;
168
169 if (!priv)
170 goto out;
171
172 spin_lock_irqsave(&priv->cmd_lock, flags);
173 priv->sp = NULL;
174 sp->priv = NULL;
175 spin_unlock_irqrestore(&priv->cmd_lock, flags);
176
177 fd = priv->fd;
178 fd->done(fd, priv->comp_status);
179out:
180 qla2x00_rel_sp(sp);
181}
182
183static void qla_nvme_ls_complete(struct work_struct *work)
184{
185 struct nvme_private *priv =
186 container_of(work, struct nvme_private, ls_work);
187
188 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
189}
190
191static void qla_nvme_sp_ls_done(srb_t *sp, int res)
192{
193 struct nvme_private *priv = sp->priv;
194
195 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
196 return;
197
198 if (res)
199 res = -EINVAL;
200
201 priv->comp_status = res;
202 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
203 schedule_work(&priv->ls_work);
204}
205
206
207static void qla_nvme_sp_done(srb_t *sp, int res)
208{
209 struct nvme_private *priv = sp->priv;
210
211 priv->comp_status = res;
212 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
213
214 return;
215}
216
217static void qla_nvme_abort_work(struct work_struct *work)
218{
219 struct nvme_private *priv =
220 container_of(work, struct nvme_private, abort_work);
221 srb_t *sp = priv->sp;
222 fc_port_t *fcport = sp->fcport;
223 struct qla_hw_data *ha = fcport->vha->hw;
224 int rval;
225
226 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
227 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
228 __func__, sp, sp->handle, fcport, fcport->deleted);
229
230 if (!ha->flags.fw_started && fcport->deleted)
231 goto out;
232
233 if (ha->flags.host_shutting_down) {
234 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
235 "%s Calling done on sp: %p, type: 0x%x\n",
236 __func__, sp, sp->type);
237 sp->done(sp, 0);
238 goto out;
239 }
240
241 rval = ha->isp_ops->abort_command(sp);
242
243 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
244 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
245 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
246 sp, sp->handle, fcport, rval);
247
248out:
249
250 kref_put(&sp->cmd_kref, sp->put_fn);
251}
252
253static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
254 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
255{
256 struct nvme_private *priv = fd->private;
257 unsigned long flags;
258
259 spin_lock_irqsave(&priv->cmd_lock, flags);
260 if (!priv->sp) {
261 spin_unlock_irqrestore(&priv->cmd_lock, flags);
262 return;
263 }
264
265 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
266 spin_unlock_irqrestore(&priv->cmd_lock, flags);
267 return;
268 }
269 spin_unlock_irqrestore(&priv->cmd_lock, flags);
270
271 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
272 schedule_work(&priv->abort_work);
273}
274
275static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
276 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
277{
278 struct qla_nvme_rport *qla_rport = rport->private;
279 fc_port_t *fcport = qla_rport->fcport;
280 struct srb_iocb *nvme;
281 struct nvme_private *priv = fd->private;
282 struct scsi_qla_host *vha;
283 int rval = QLA_FUNCTION_FAILED;
284 struct qla_hw_data *ha;
285 srb_t *sp;
286
287
288 if (!fcport || (fcport && fcport->deleted))
289 return rval;
290
291 vha = fcport->vha;
292 ha = vha->hw;
293
294 if (!ha->flags.fw_started)
295 return rval;
296
297
298 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
299 if (!sp)
300 return rval;
301
302 sp->type = SRB_NVME_LS;
303 sp->name = "nvme_ls";
304 sp->done = qla_nvme_sp_ls_done;
305 sp->put_fn = qla_nvme_release_ls_cmd_kref;
306 sp->priv = priv;
307 priv->sp = sp;
308 kref_init(&sp->cmd_kref);
309 spin_lock_init(&priv->cmd_lock);
310 nvme = &sp->u.iocb_cmd;
311 priv->fd = fd;
312 nvme->u.nvme.desc = fd;
313 nvme->u.nvme.dir = 0;
314 nvme->u.nvme.dl = 0;
315 nvme->u.nvme.cmd_len = fd->rqstlen;
316 nvme->u.nvme.rsp_len = fd->rsplen;
317 nvme->u.nvme.rsp_dma = fd->rspdma;
318 nvme->u.nvme.timeout_sec = fd->timeout;
319 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
320 fd->rqstlen, DMA_TO_DEVICE);
321 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
322 fd->rqstlen, DMA_TO_DEVICE);
323
324 rval = qla2x00_start_sp(sp);
325 if (rval != QLA_SUCCESS) {
326 ql_log(ql_log_warn, vha, 0x700e,
327 "qla2x00_start_sp failed = %d\n", rval);
328 wake_up(&sp->nvme_ls_waitq);
329 sp->priv = NULL;
330 priv->sp = NULL;
331 qla2x00_rel_sp(sp);
332 return rval;
333 }
334
335 return rval;
336}
337
338static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
339 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
340 struct nvmefc_fcp_req *fd)
341{
342 struct nvme_private *priv = fd->private;
343 unsigned long flags;
344
345 spin_lock_irqsave(&priv->cmd_lock, flags);
346 if (!priv->sp) {
347 spin_unlock_irqrestore(&priv->cmd_lock, flags);
348 return;
349 }
350 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
351 spin_unlock_irqrestore(&priv->cmd_lock, flags);
352 return;
353 }
354 spin_unlock_irqrestore(&priv->cmd_lock, flags);
355
356 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
357 schedule_work(&priv->abort_work);
358}
359
360static inline int qla2x00_start_nvme_mq(srb_t *sp)
361{
362 unsigned long flags;
363 uint32_t *clr_ptr;
364 uint32_t handle;
365 struct cmd_nvme *cmd_pkt;
366 uint16_t cnt, i;
367 uint16_t req_cnt;
368 uint16_t tot_dsds;
369 uint16_t avail_dsds;
370 struct dsd64 *cur_dsd;
371 struct req_que *req = NULL;
372 struct scsi_qla_host *vha = sp->fcport->vha;
373 struct qla_hw_data *ha = vha->hw;
374 struct qla_qpair *qpair = sp->qpair;
375 struct srb_iocb *nvme = &sp->u.iocb_cmd;
376 struct scatterlist *sgl, *sg;
377 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
378 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
379 uint32_t rval = QLA_SUCCESS;
380
381
382 req = qpair->req;
383 tot_dsds = fd->sg_cnt;
384
385
386 spin_lock_irqsave(&qpair->qp_lock, flags);
387
388 handle = qla2xxx_get_next_handle(req);
389 if (handle == 0) {
390 rval = -EBUSY;
391 goto queuing_error;
392 }
393 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
394 if (req->cnt < (req_cnt + 2)) {
395 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
396 rd_reg_dword_relaxed(req->req_q_out);
397
398 if (req->ring_index < cnt)
399 req->cnt = cnt - req->ring_index;
400 else
401 req->cnt = req->length - (req->ring_index - cnt);
402
403 if (req->cnt < (req_cnt + 2)){
404 rval = -EBUSY;
405 goto queuing_error;
406 }
407 }
408
409 if (unlikely(!fd->sqid)) {
410 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
411 nvme->u.nvme.aen_op = 1;
412 atomic_inc(&ha->nvme_active_aen_cnt);
413 }
414 }
415
416
417 req->current_outstanding_cmd = handle;
418 req->outstanding_cmds[handle] = sp;
419 sp->handle = handle;
420 req->cnt -= req_cnt;
421
422 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
423 cmd_pkt->handle = make_handle(req->id, handle);
424
425
426 clr_ptr = (uint32_t *)cmd_pkt + 2;
427 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
428
429 cmd_pkt->entry_status = 0;
430
431
432 cmd_pkt->entry_type = COMMAND_NVME;
433
434
435 if (fd->io_dir == NVMEFC_FCP_READ) {
436 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
437 qpair->counters.input_bytes += fd->payload_length;
438 qpair->counters.input_requests++;
439 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
440 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
441 if ((vha->flags.nvme_first_burst) &&
442 (sp->fcport->nvme_prli_service_param &
443 NVME_PRLI_SP_FIRST_BURST)) {
444 if ((fd->payload_length <=
445 sp->fcport->nvme_first_burst_size) ||
446 (sp->fcport->nvme_first_burst_size == 0))
447 cmd_pkt->control_flags |=
448 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
449 }
450 qpair->counters.output_bytes += fd->payload_length;
451 qpair->counters.output_requests++;
452 } else if (fd->io_dir == 0) {
453 cmd_pkt->control_flags = 0;
454 }
455
456 if (vha->flags.nvme2_enabled &&
457 cmd->sqe.common.opcode == nvme_admin_async_event) {
458 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
459 }
460
461
462 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
463 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
464 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
465 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
466 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
467
468
469 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
470 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
471
472
473 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
474 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
475
476 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
477 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
478
479
480 avail_dsds = 1;
481 cur_dsd = &cmd_pkt->nvme_dsd;
482 sgl = fd->first_sgl;
483
484
485 for_each_sg(sgl, sg, tot_dsds, i) {
486 cont_a64_entry_t *cont_pkt;
487
488
489 if (avail_dsds == 0) {
490
491
492
493
494
495
496 req->ring_index++;
497 if (req->ring_index == req->length) {
498 req->ring_index = 0;
499 req->ring_ptr = req->ring;
500 } else {
501 req->ring_ptr++;
502 }
503 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
504 put_unaligned_le32(CONTINUE_A64_TYPE,
505 &cont_pkt->entry_type);
506
507 cur_dsd = cont_pkt->dsd;
508 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
509 }
510
511 append_dsd64(&cur_dsd, sg);
512 avail_dsds--;
513 }
514
515
516 cmd_pkt->entry_count = (uint8_t)req_cnt;
517 wmb();
518
519
520 req->ring_index++;
521 if (req->ring_index == req->length) {
522 req->ring_index = 0;
523 req->ring_ptr = req->ring;
524 } else {
525 req->ring_ptr++;
526 }
527
528
529 wrt_reg_dword(req->req_q_in, req->ring_index);
530
531queuing_error:
532 spin_unlock_irqrestore(&qpair->qp_lock, flags);
533 return rval;
534}
535
536
537static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
538 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
539 struct nvmefc_fcp_req *fd)
540{
541 fc_port_t *fcport;
542 struct srb_iocb *nvme;
543 struct scsi_qla_host *vha;
544 int rval;
545 srb_t *sp;
546 struct qla_qpair *qpair = hw_queue_handle;
547 struct nvme_private *priv = fd->private;
548 struct qla_nvme_rport *qla_rport = rport->private;
549
550 if (!priv) {
551
552 return -ENODEV;
553 }
554
555 fcport = qla_rport->fcport;
556
557 if (!qpair || !fcport)
558 return -ENODEV;
559
560 if (!qpair->fw_started || fcport->deleted)
561 return -EBUSY;
562
563 vha = fcport->vha;
564
565 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
566 return -ENODEV;
567
568 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
569 (qpair && !qpair->fw_started) || fcport->deleted)
570 return -EBUSY;
571
572
573
574
575
576
577
578
579 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
580 return -EBUSY;
581
582
583 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
584 if (!sp)
585 return -EBUSY;
586
587 init_waitqueue_head(&sp->nvme_ls_waitq);
588 kref_init(&sp->cmd_kref);
589 spin_lock_init(&priv->cmd_lock);
590 sp->priv = priv;
591 priv->sp = sp;
592 sp->type = SRB_NVME_CMD;
593 sp->name = "nvme_cmd";
594 sp->done = qla_nvme_sp_done;
595 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
596 sp->qpair = qpair;
597 sp->vha = vha;
598 nvme = &sp->u.iocb_cmd;
599 nvme->u.nvme.desc = fd;
600
601 rval = qla2x00_start_nvme_mq(sp);
602 if (rval != QLA_SUCCESS) {
603 ql_log(ql_log_warn, vha, 0x212d,
604 "qla2x00_start_nvme_mq failed = %d\n", rval);
605 wake_up(&sp->nvme_ls_waitq);
606 sp->priv = NULL;
607 priv->sp = NULL;
608 qla2xxx_rel_qpair_sp(sp->qpair, sp);
609 }
610
611 return rval;
612}
613
614static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
615{
616 struct scsi_qla_host *vha = lport->private;
617
618 ql_log(ql_log_info, vha, 0x210f,
619 "localport delete of %p completed.\n", vha->nvme_local_port);
620 vha->nvme_local_port = NULL;
621 complete(&vha->nvme_del_done);
622}
623
624static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
625{
626 fc_port_t *fcport;
627 struct qla_nvme_rport *qla_rport = rport->private;
628
629 fcport = qla_rport->fcport;
630 fcport->nvme_remote_port = NULL;
631 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
632 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
633 ql_log(ql_log_info, fcport->vha, 0x2110,
634 "remoteport_delete of %p %8phN completed.\n",
635 fcport, fcport->port_name);
636 complete(&fcport->nvme_del_done);
637}
638
639static struct nvme_fc_port_template qla_nvme_fc_transport = {
640 .localport_delete = qla_nvme_localport_delete,
641 .remoteport_delete = qla_nvme_remoteport_delete,
642 .create_queue = qla_nvme_alloc_queue,
643 .delete_queue = NULL,
644 .ls_req = qla_nvme_ls_req,
645 .ls_abort = qla_nvme_ls_abort,
646 .fcp_io = qla_nvme_post_cmd,
647 .fcp_abort = qla_nvme_fcp_abort,
648 .max_hw_queues = 8,
649 .max_sgl_segments = 1024,
650 .max_dif_sgl_segments = 64,
651 .dma_boundary = 0xFFFFFFFF,
652 .local_priv_sz = 8,
653 .remote_priv_sz = sizeof(struct qla_nvme_rport),
654 .lsrqst_priv_sz = sizeof(struct nvme_private),
655 .fcprqst_priv_sz = sizeof(struct nvme_private),
656};
657
658void qla_nvme_unregister_remote_port(struct fc_port *fcport)
659{
660 int ret;
661
662 if (!IS_ENABLED(CONFIG_NVME_FC))
663 return;
664
665 ql_log(ql_log_warn, NULL, 0x2112,
666 "%s: unregister remoteport on %p %8phN\n",
667 __func__, fcport, fcport->port_name);
668
669 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
670 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
671
672 init_completion(&fcport->nvme_del_done);
673 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
674 if (ret)
675 ql_log(ql_log_info, fcport->vha, 0x2114,
676 "%s: Failed to unregister nvme_remote_port (%d)\n",
677 __func__, ret);
678 wait_for_completion(&fcport->nvme_del_done);
679}
680
681void qla_nvme_delete(struct scsi_qla_host *vha)
682{
683 int nv_ret;
684
685 if (!IS_ENABLED(CONFIG_NVME_FC))
686 return;
687
688 if (vha->nvme_local_port) {
689 init_completion(&vha->nvme_del_done);
690 ql_log(ql_log_info, vha, 0x2116,
691 "unregister localport=%p\n",
692 vha->nvme_local_port);
693 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
694 if (nv_ret)
695 ql_log(ql_log_info, vha, 0x2115,
696 "Unregister of localport failed\n");
697 else
698 wait_for_completion(&vha->nvme_del_done);
699 }
700}
701
702int qla_nvme_register_hba(struct scsi_qla_host *vha)
703{
704 struct nvme_fc_port_template *tmpl;
705 struct qla_hw_data *ha;
706 struct nvme_fc_port_info pinfo;
707 int ret = -EINVAL;
708
709 if (!IS_ENABLED(CONFIG_NVME_FC))
710 return ret;
711
712 ha = vha->hw;
713 tmpl = &qla_nvme_fc_transport;
714
715 WARN_ON(vha->nvme_local_port);
716
717 if (ha->max_req_queues < 3) {
718 if (!ha->flags.max_req_queue_warned)
719 ql_log(ql_log_info, vha, 0x2120,
720 "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
721 __func__, ha->max_req_queues);
722 ha->flags.max_req_queue_warned = 1;
723 return ret;
724 }
725
726 qla_nvme_fc_transport.max_hw_queues =
727 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
728 (uint8_t)(ha->max_req_queues - 2));
729
730 pinfo.node_name = wwn_to_u64(vha->node_name);
731 pinfo.port_name = wwn_to_u64(vha->port_name);
732 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
733 pinfo.port_id = vha->d_id.b24;
734
735 ql_log(ql_log_info, vha, 0xffff,
736 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
737 pinfo.node_name, pinfo.port_name, pinfo.port_id);
738 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
739
740 ret = nvme_fc_register_localport(&pinfo, tmpl,
741 get_device(&ha->pdev->dev), &vha->nvme_local_port);
742 if (ret) {
743 ql_log(ql_log_warn, vha, 0xffff,
744 "register_localport failed: ret=%x\n", ret);
745 } else {
746 vha->nvme_local_port->private = vha;
747 }
748
749 return ret;
750}
751