1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9#include <uapi/scsi/fc/fc_els.h>
10#include <linux/delay.h>
11#include <linux/overflow.h>
12
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17#include <scsi/scsi_transport_fc.h>
18
19
20
21
22enum nvme_fc_queue_flags {
23 NVME_FC_Q_CONNECTED = 0,
24 NVME_FC_Q_LIVE,
25};
26
27#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
28
29struct nvme_fc_queue {
30 struct nvme_fc_ctrl *ctrl;
31 struct device *dev;
32 struct blk_mq_hw_ctx *hctx;
33 void *lldd_handle;
34 size_t cmnd_capsule_len;
35 u32 qnum;
36 u32 rqcnt;
37 u32 seqno;
38
39 u64 connection_id;
40 atomic_t csn;
41
42 unsigned long flags;
43} __aligned(sizeof(u64));
44
45enum nvme_fcop_flags {
46 FCOP_FLAGS_TERMIO = (1 << 0),
47 FCOP_FLAGS_AEN = (1 << 1),
48};
49
50struct nvmefc_ls_req_op {
51 struct nvmefc_ls_req ls_req;
52
53 struct nvme_fc_rport *rport;
54 struct nvme_fc_queue *queue;
55 struct request *rq;
56 u32 flags;
57
58 int ls_error;
59 struct completion ls_done;
60 struct list_head lsreq_list;
61 bool req_queued;
62};
63
64enum nvme_fcpop_state {
65 FCPOP_STATE_UNINIT = 0,
66 FCPOP_STATE_IDLE = 1,
67 FCPOP_STATE_ACTIVE = 2,
68 FCPOP_STATE_ABORTED = 3,
69 FCPOP_STATE_COMPLETE = 4,
70};
71
72struct nvme_fc_fcp_op {
73 struct nvme_request nreq;
74
75
76
77
78
79
80
81 struct nvmefc_fcp_req fcp_req;
82
83 struct nvme_fc_ctrl *ctrl;
84 struct nvme_fc_queue *queue;
85 struct request *rq;
86
87 atomic_t state;
88 u32 flags;
89 u32 rqno;
90 u32 nents;
91
92 struct nvme_fc_cmd_iu cmd_iu;
93 struct nvme_fc_ersp_iu rsp_iu;
94};
95
96struct nvme_fcp_op_w_sgl {
97 struct nvme_fc_fcp_op op;
98 struct scatterlist sgl[NVME_INLINE_SG_CNT];
99 uint8_t priv[0];
100};
101
102struct nvme_fc_lport {
103 struct nvme_fc_local_port localport;
104
105 struct ida endp_cnt;
106 struct list_head port_list;
107 struct list_head endp_list;
108 struct device *dev;
109 struct nvme_fc_port_template *ops;
110 struct kref ref;
111 atomic_t act_rport_cnt;
112} __aligned(sizeof(u64));
113
114struct nvme_fc_rport {
115 struct nvme_fc_remote_port remoteport;
116
117 struct list_head endp_list;
118 struct list_head ctrl_list;
119 struct list_head ls_req_list;
120 struct list_head disc_list;
121 struct device *dev;
122 struct nvme_fc_lport *lport;
123 spinlock_t lock;
124 struct kref ref;
125 atomic_t act_ctrl_cnt;
126 unsigned long dev_loss_end;
127} __aligned(sizeof(u64));
128
129enum nvme_fcctrl_flags {
130 FCCTRL_TERMIO = (1 << 0),
131};
132
133struct nvme_fc_ctrl {
134 spinlock_t lock;
135 struct nvme_fc_queue *queues;
136 struct device *dev;
137 struct nvme_fc_lport *lport;
138 struct nvme_fc_rport *rport;
139 u32 cnum;
140
141 bool ioq_live;
142 bool assoc_active;
143 atomic_t err_work_active;
144 u64 association_id;
145
146 struct list_head ctrl_list;
147
148 struct blk_mq_tag_set admin_tag_set;
149 struct blk_mq_tag_set tag_set;
150
151 struct delayed_work connect_work;
152 struct work_struct err_work;
153
154 struct kref ref;
155 u32 flags;
156 u32 iocnt;
157 wait_queue_head_t ioabort_wait;
158
159 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
160
161 struct nvme_ctrl ctrl;
162};
163
164static inline struct nvme_fc_ctrl *
165to_fc_ctrl(struct nvme_ctrl *ctrl)
166{
167 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
168}
169
170static inline struct nvme_fc_lport *
171localport_to_lport(struct nvme_fc_local_port *portptr)
172{
173 return container_of(portptr, struct nvme_fc_lport, localport);
174}
175
176static inline struct nvme_fc_rport *
177remoteport_to_rport(struct nvme_fc_remote_port *portptr)
178{
179 return container_of(portptr, struct nvme_fc_rport, remoteport);
180}
181
182static inline struct nvmefc_ls_req_op *
183ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
184{
185 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
186}
187
188static inline struct nvme_fc_fcp_op *
189fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
190{
191 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
192}
193
194
195
196
197
198
199static DEFINE_SPINLOCK(nvme_fc_lock);
200
201static LIST_HEAD(nvme_fc_lport_list);
202static DEFINE_IDA(nvme_fc_local_port_cnt);
203static DEFINE_IDA(nvme_fc_ctrl_cnt);
204
205static struct workqueue_struct *nvme_fc_wq;
206
207static bool nvme_fc_waiting_to_unload;
208static DECLARE_COMPLETION(nvme_fc_unload_proceed);
209
210
211
212
213
214static struct device *fc_udev_device;
215
216
217
218
219static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 struct nvme_fc_queue *, unsigned int);
221
222static void
223nvme_fc_free_lport(struct kref *ref)
224{
225 struct nvme_fc_lport *lport =
226 container_of(ref, struct nvme_fc_lport, ref);
227 unsigned long flags;
228
229 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 WARN_ON(!list_empty(&lport->endp_list));
231
232
233 spin_lock_irqsave(&nvme_fc_lock, flags);
234 list_del(&lport->port_list);
235 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
236 complete(&nvme_fc_unload_proceed);
237 spin_unlock_irqrestore(&nvme_fc_lock, flags);
238
239 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
240 ida_destroy(&lport->endp_cnt);
241
242 put_device(lport->dev);
243
244 kfree(lport);
245}
246
247static void
248nvme_fc_lport_put(struct nvme_fc_lport *lport)
249{
250 kref_put(&lport->ref, nvme_fc_free_lport);
251}
252
253static int
254nvme_fc_lport_get(struct nvme_fc_lport *lport)
255{
256 return kref_get_unless_zero(&lport->ref);
257}
258
259
260static struct nvme_fc_lport *
261nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
262 struct nvme_fc_port_template *ops,
263 struct device *dev)
264{
265 struct nvme_fc_lport *lport;
266 unsigned long flags;
267
268 spin_lock_irqsave(&nvme_fc_lock, flags);
269
270 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
271 if (lport->localport.node_name != pinfo->node_name ||
272 lport->localport.port_name != pinfo->port_name)
273 continue;
274
275 if (lport->dev != dev) {
276 lport = ERR_PTR(-EXDEV);
277 goto out_done;
278 }
279
280 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
281 lport = ERR_PTR(-EEXIST);
282 goto out_done;
283 }
284
285 if (!nvme_fc_lport_get(lport)) {
286
287
288
289
290 lport = NULL;
291 goto out_done;
292 }
293
294
295
296 lport->ops = ops;
297 lport->localport.port_role = pinfo->port_role;
298 lport->localport.port_id = pinfo->port_id;
299 lport->localport.port_state = FC_OBJSTATE_ONLINE;
300
301 spin_unlock_irqrestore(&nvme_fc_lock, flags);
302
303 return lport;
304 }
305
306 lport = NULL;
307
308out_done:
309 spin_unlock_irqrestore(&nvme_fc_lock, flags);
310
311 return lport;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331int
332nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
333 struct nvme_fc_port_template *template,
334 struct device *dev,
335 struct nvme_fc_local_port **portptr)
336{
337 struct nvme_fc_lport *newrec;
338 unsigned long flags;
339 int ret, idx;
340
341 if (!template->localport_delete || !template->remoteport_delete ||
342 !template->ls_req || !template->fcp_io ||
343 !template->ls_abort || !template->fcp_abort ||
344 !template->max_hw_queues || !template->max_sgl_segments ||
345 !template->max_dif_sgl_segments || !template->dma_boundary) {
346 ret = -EINVAL;
347 goto out_reghost_failed;
348 }
349
350
351
352
353
354
355
356
357 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
358
359
360 if (IS_ERR(newrec)) {
361 ret = PTR_ERR(newrec);
362 goto out_reghost_failed;
363
364
365 } else if (newrec) {
366 *portptr = &newrec->localport;
367 return 0;
368 }
369
370
371
372 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
373 GFP_KERNEL);
374 if (!newrec) {
375 ret = -ENOMEM;
376 goto out_reghost_failed;
377 }
378
379 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
380 if (idx < 0) {
381 ret = -ENOSPC;
382 goto out_fail_kfree;
383 }
384
385 if (!get_device(dev) && dev) {
386 ret = -ENODEV;
387 goto out_ida_put;
388 }
389
390 INIT_LIST_HEAD(&newrec->port_list);
391 INIT_LIST_HEAD(&newrec->endp_list);
392 kref_init(&newrec->ref);
393 atomic_set(&newrec->act_rport_cnt, 0);
394 newrec->ops = template;
395 newrec->dev = dev;
396 ida_init(&newrec->endp_cnt);
397 newrec->localport.private = &newrec[1];
398 newrec->localport.node_name = pinfo->node_name;
399 newrec->localport.port_name = pinfo->port_name;
400 newrec->localport.port_role = pinfo->port_role;
401 newrec->localport.port_id = pinfo->port_id;
402 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
403 newrec->localport.port_num = idx;
404
405 spin_lock_irqsave(&nvme_fc_lock, flags);
406 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
408
409 if (dev)
410 dma_set_seg_boundary(dev, template->dma_boundary);
411
412 *portptr = &newrec->localport;
413 return 0;
414
415out_ida_put:
416 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
417out_fail_kfree:
418 kfree(newrec);
419out_reghost_failed:
420 *portptr = NULL;
421
422 return ret;
423}
424EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
425
426
427
428
429
430
431
432
433
434
435
436int
437nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
438{
439 struct nvme_fc_lport *lport = localport_to_lport(portptr);
440 unsigned long flags;
441
442 if (!portptr)
443 return -EINVAL;
444
445 spin_lock_irqsave(&nvme_fc_lock, flags);
446
447 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
448 spin_unlock_irqrestore(&nvme_fc_lock, flags);
449 return -EINVAL;
450 }
451 portptr->port_state = FC_OBJSTATE_DELETED;
452
453 spin_unlock_irqrestore(&nvme_fc_lock, flags);
454
455 if (atomic_read(&lport->act_rport_cnt) == 0)
456 lport->ops->localport_delete(&lport->localport);
457
458 nvme_fc_lport_put(lport);
459
460 return 0;
461}
462EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
463
464
465
466
467
468
469
470
471
472#define FCNVME_TRADDR_LENGTH 64
473
474static void
475nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
476 struct nvme_fc_rport *rport)
477{
478 char hostaddr[FCNVME_TRADDR_LENGTH];
479 char tgtaddr[FCNVME_TRADDR_LENGTH];
480 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
481
482 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
483 return;
484
485 snprintf(hostaddr, sizeof(hostaddr),
486 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
487 lport->localport.node_name, lport->localport.port_name);
488 snprintf(tgtaddr, sizeof(tgtaddr),
489 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
490 rport->remoteport.node_name, rport->remoteport.port_name);
491 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
492}
493
494static void
495nvme_fc_free_rport(struct kref *ref)
496{
497 struct nvme_fc_rport *rport =
498 container_of(ref, struct nvme_fc_rport, ref);
499 struct nvme_fc_lport *lport =
500 localport_to_lport(rport->remoteport.localport);
501 unsigned long flags;
502
503 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
504 WARN_ON(!list_empty(&rport->ctrl_list));
505
506
507 spin_lock_irqsave(&nvme_fc_lock, flags);
508 list_del(&rport->endp_list);
509 spin_unlock_irqrestore(&nvme_fc_lock, flags);
510
511 WARN_ON(!list_empty(&rport->disc_list));
512 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
513
514 kfree(rport);
515
516 nvme_fc_lport_put(lport);
517}
518
519static void
520nvme_fc_rport_put(struct nvme_fc_rport *rport)
521{
522 kref_put(&rport->ref, nvme_fc_free_rport);
523}
524
525static int
526nvme_fc_rport_get(struct nvme_fc_rport *rport)
527{
528 return kref_get_unless_zero(&rport->ref);
529}
530
531static void
532nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
533{
534 switch (ctrl->ctrl.state) {
535 case NVME_CTRL_NEW:
536 case NVME_CTRL_CONNECTING:
537
538
539
540
541 dev_info(ctrl->ctrl.device,
542 "NVME-FC{%d}: connectivity re-established. "
543 "Attempting reconnect\n", ctrl->cnum);
544
545 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
546 break;
547
548 case NVME_CTRL_RESETTING:
549
550
551
552
553
554 break;
555
556 default:
557
558 break;
559 }
560}
561
562static struct nvme_fc_rport *
563nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
564 struct nvme_fc_port_info *pinfo)
565{
566 struct nvme_fc_rport *rport;
567 struct nvme_fc_ctrl *ctrl;
568 unsigned long flags;
569
570 spin_lock_irqsave(&nvme_fc_lock, flags);
571
572 list_for_each_entry(rport, &lport->endp_list, endp_list) {
573 if (rport->remoteport.node_name != pinfo->node_name ||
574 rport->remoteport.port_name != pinfo->port_name)
575 continue;
576
577 if (!nvme_fc_rport_get(rport)) {
578 rport = ERR_PTR(-ENOLCK);
579 goto out_done;
580 }
581
582 spin_unlock_irqrestore(&nvme_fc_lock, flags);
583
584 spin_lock_irqsave(&rport->lock, flags);
585
586
587 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
588
589 spin_unlock_irqrestore(&rport->lock, flags);
590 nvme_fc_rport_put(rport);
591 return ERR_PTR(-ESTALE);
592 }
593
594 rport->remoteport.port_role = pinfo->port_role;
595 rport->remoteport.port_id = pinfo->port_id;
596 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
597 rport->dev_loss_end = 0;
598
599
600
601
602
603 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
604 nvme_fc_resume_controller(ctrl);
605
606 spin_unlock_irqrestore(&rport->lock, flags);
607
608 return rport;
609 }
610
611 rport = NULL;
612
613out_done:
614 spin_unlock_irqrestore(&nvme_fc_lock, flags);
615
616 return rport;
617}
618
619static inline void
620__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
621 struct nvme_fc_port_info *pinfo)
622{
623 if (pinfo->dev_loss_tmo)
624 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
625 else
626 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
627}
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645int
646nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
647 struct nvme_fc_port_info *pinfo,
648 struct nvme_fc_remote_port **portptr)
649{
650 struct nvme_fc_lport *lport = localport_to_lport(localport);
651 struct nvme_fc_rport *newrec;
652 unsigned long flags;
653 int ret, idx;
654
655 if (!nvme_fc_lport_get(lport)) {
656 ret = -ESHUTDOWN;
657 goto out_reghost_failed;
658 }
659
660
661
662
663
664
665 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
666
667
668 if (IS_ERR(newrec)) {
669 ret = PTR_ERR(newrec);
670 goto out_lport_put;
671
672
673 } else if (newrec) {
674 nvme_fc_lport_put(lport);
675 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
676 nvme_fc_signal_discovery_scan(lport, newrec);
677 *portptr = &newrec->remoteport;
678 return 0;
679 }
680
681
682
683 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
684 GFP_KERNEL);
685 if (!newrec) {
686 ret = -ENOMEM;
687 goto out_lport_put;
688 }
689
690 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
691 if (idx < 0) {
692 ret = -ENOSPC;
693 goto out_kfree_rport;
694 }
695
696 INIT_LIST_HEAD(&newrec->endp_list);
697 INIT_LIST_HEAD(&newrec->ctrl_list);
698 INIT_LIST_HEAD(&newrec->ls_req_list);
699 INIT_LIST_HEAD(&newrec->disc_list);
700 kref_init(&newrec->ref);
701 atomic_set(&newrec->act_ctrl_cnt, 0);
702 spin_lock_init(&newrec->lock);
703 newrec->remoteport.localport = &lport->localport;
704 newrec->dev = lport->dev;
705 newrec->lport = lport;
706 newrec->remoteport.private = &newrec[1];
707 newrec->remoteport.port_role = pinfo->port_role;
708 newrec->remoteport.node_name = pinfo->node_name;
709 newrec->remoteport.port_name = pinfo->port_name;
710 newrec->remoteport.port_id = pinfo->port_id;
711 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
712 newrec->remoteport.port_num = idx;
713 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
714
715 spin_lock_irqsave(&nvme_fc_lock, flags);
716 list_add_tail(&newrec->endp_list, &lport->endp_list);
717 spin_unlock_irqrestore(&nvme_fc_lock, flags);
718
719 nvme_fc_signal_discovery_scan(lport, newrec);
720
721 *portptr = &newrec->remoteport;
722 return 0;
723
724out_kfree_rport:
725 kfree(newrec);
726out_lport_put:
727 nvme_fc_lport_put(lport);
728out_reghost_failed:
729 *portptr = NULL;
730 return ret;
731}
732EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
733
734static int
735nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
736{
737 struct nvmefc_ls_req_op *lsop;
738 unsigned long flags;
739
740restart:
741 spin_lock_irqsave(&rport->lock, flags);
742
743 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
744 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
745 lsop->flags |= FCOP_FLAGS_TERMIO;
746 spin_unlock_irqrestore(&rport->lock, flags);
747 rport->lport->ops->ls_abort(&rport->lport->localport,
748 &rport->remoteport,
749 &lsop->ls_req);
750 goto restart;
751 }
752 }
753 spin_unlock_irqrestore(&rport->lock, flags);
754
755 return 0;
756}
757
758static void
759nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
760{
761 dev_info(ctrl->ctrl.device,
762 "NVME-FC{%d}: controller connectivity lost. Awaiting "
763 "Reconnect", ctrl->cnum);
764
765 switch (ctrl->ctrl.state) {
766 case NVME_CTRL_NEW:
767 case NVME_CTRL_LIVE:
768
769
770
771
772
773
774
775 if (nvme_reset_ctrl(&ctrl->ctrl)) {
776 dev_warn(ctrl->ctrl.device,
777 "NVME-FC{%d}: Couldn't schedule reset.\n",
778 ctrl->cnum);
779 nvme_delete_ctrl(&ctrl->ctrl);
780 }
781 break;
782
783 case NVME_CTRL_CONNECTING:
784
785
786
787
788
789
790
791 break;
792
793 case NVME_CTRL_RESETTING:
794
795
796
797
798
799
800 break;
801
802 case NVME_CTRL_DELETING:
803 default:
804
805 break;
806 }
807}
808
809
810
811
812
813
814
815
816
817
818
819
820int
821nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
822{
823 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
824 struct nvme_fc_ctrl *ctrl;
825 unsigned long flags;
826
827 if (!portptr)
828 return -EINVAL;
829
830 spin_lock_irqsave(&rport->lock, flags);
831
832 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
833 spin_unlock_irqrestore(&rport->lock, flags);
834 return -EINVAL;
835 }
836 portptr->port_state = FC_OBJSTATE_DELETED;
837
838 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
839
840 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
841
842 if (!portptr->dev_loss_tmo) {
843 dev_warn(ctrl->ctrl.device,
844 "NVME-FC{%d}: controller connectivity lost.\n",
845 ctrl->cnum);
846 nvme_delete_ctrl(&ctrl->ctrl);
847 } else
848 nvme_fc_ctrl_connectivity_loss(ctrl);
849 }
850
851 spin_unlock_irqrestore(&rport->lock, flags);
852
853 nvme_fc_abort_lsops(rport);
854
855 if (atomic_read(&rport->act_ctrl_cnt) == 0)
856 rport->lport->ops->remoteport_delete(portptr);
857
858
859
860
861
862
863 nvme_fc_rport_put(rport);
864
865 return 0;
866}
867EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
868
869
870
871
872
873
874
875
876
877void
878nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
879{
880 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
881
882 nvme_fc_signal_discovery_scan(rport->lport, rport);
883}
884EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
885
886int
887nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
888 u32 dev_loss_tmo)
889{
890 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
891 unsigned long flags;
892
893 spin_lock_irqsave(&rport->lock, flags);
894
895 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
896 spin_unlock_irqrestore(&rport->lock, flags);
897 return -EINVAL;
898 }
899
900
901 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
902
903 spin_unlock_irqrestore(&rport->lock, flags);
904
905 return 0;
906}
907EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928static inline dma_addr_t
929fc_dma_map_single(struct device *dev, void *ptr, size_t size,
930 enum dma_data_direction dir)
931{
932 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
933}
934
935static inline int
936fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
937{
938 return dev ? dma_mapping_error(dev, dma_addr) : 0;
939}
940
941static inline void
942fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
943 enum dma_data_direction dir)
944{
945 if (dev)
946 dma_unmap_single(dev, addr, size, dir);
947}
948
949static inline void
950fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
951 enum dma_data_direction dir)
952{
953 if (dev)
954 dma_sync_single_for_cpu(dev, addr, size, dir);
955}
956
957static inline void
958fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
959 enum dma_data_direction dir)
960{
961 if (dev)
962 dma_sync_single_for_device(dev, addr, size, dir);
963}
964
965
966static int
967fc_map_sg(struct scatterlist *sg, int nents)
968{
969 struct scatterlist *s;
970 int i;
971
972 WARN_ON(nents == 0 || sg[0].length == 0);
973
974 for_each_sg(sg, s, nents, i) {
975 s->dma_address = 0L;
976#ifdef CONFIG_NEED_SG_DMA_LENGTH
977 s->dma_length = s->length;
978#endif
979 }
980 return nents;
981}
982
983static inline int
984fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
985 enum dma_data_direction dir)
986{
987 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
988}
989
990static inline void
991fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
992 enum dma_data_direction dir)
993{
994 if (dev)
995 dma_unmap_sg(dev, sg, nents, dir);
996}
997
998
999
1000static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1001static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1002
1003
1004static void
1005__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1006{
1007 struct nvme_fc_rport *rport = lsop->rport;
1008 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&rport->lock, flags);
1012
1013 if (!lsop->req_queued) {
1014 spin_unlock_irqrestore(&rport->lock, flags);
1015 return;
1016 }
1017
1018 list_del(&lsop->lsreq_list);
1019
1020 lsop->req_queued = false;
1021
1022 spin_unlock_irqrestore(&rport->lock, flags);
1023
1024 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1025 (lsreq->rqstlen + lsreq->rsplen),
1026 DMA_BIDIRECTIONAL);
1027
1028 nvme_fc_rport_put(rport);
1029}
1030
1031static int
1032__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1033 struct nvmefc_ls_req_op *lsop,
1034 void (*done)(struct nvmefc_ls_req *req, int status))
1035{
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1038 int ret = 0;
1039
1040 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1041 return -ECONNREFUSED;
1042
1043 if (!nvme_fc_rport_get(rport))
1044 return -ESHUTDOWN;
1045
1046 lsreq->done = done;
1047 lsop->rport = rport;
1048 lsop->req_queued = false;
1049 INIT_LIST_HEAD(&lsop->lsreq_list);
1050 init_completion(&lsop->ls_done);
1051
1052 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1053 lsreq->rqstlen + lsreq->rsplen,
1054 DMA_BIDIRECTIONAL);
1055 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1056 ret = -EFAULT;
1057 goto out_putrport;
1058 }
1059 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1060
1061 spin_lock_irqsave(&rport->lock, flags);
1062
1063 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1064
1065 lsop->req_queued = true;
1066
1067 spin_unlock_irqrestore(&rport->lock, flags);
1068
1069 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1070 &rport->remoteport, lsreq);
1071 if (ret)
1072 goto out_unlink;
1073
1074 return 0;
1075
1076out_unlink:
1077 lsop->ls_error = ret;
1078 spin_lock_irqsave(&rport->lock, flags);
1079 lsop->req_queued = false;
1080 list_del(&lsop->lsreq_list);
1081 spin_unlock_irqrestore(&rport->lock, flags);
1082 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1083 (lsreq->rqstlen + lsreq->rsplen),
1084 DMA_BIDIRECTIONAL);
1085out_putrport:
1086 nvme_fc_rport_put(rport);
1087
1088 return ret;
1089}
1090
1091static void
1092nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1093{
1094 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1095
1096 lsop->ls_error = status;
1097 complete(&lsop->ls_done);
1098}
1099
1100static int
1101nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1102{
1103 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1104 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1105 int ret;
1106
1107 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1108
1109 if (!ret) {
1110
1111
1112
1113
1114
1115
1116 wait_for_completion(&lsop->ls_done);
1117
1118 __nvme_fc_finish_ls_req(lsop);
1119
1120 ret = lsop->ls_error;
1121 }
1122
1123 if (ret)
1124 return ret;
1125
1126
1127 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1128 return -ENXIO;
1129
1130 return 0;
1131}
1132
1133static int
1134nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1135 struct nvmefc_ls_req_op *lsop,
1136 void (*done)(struct nvmefc_ls_req *req, int status))
1137{
1138
1139
1140 return __nvme_fc_send_ls_req(rport, lsop, done);
1141}
1142
1143
1144enum {
1145 VERR_NO_ERROR = 0,
1146 VERR_LSACC = 1,
1147 VERR_LSDESC_RQST = 2,
1148 VERR_LSDESC_RQST_LEN = 3,
1149 VERR_ASSOC_ID = 4,
1150 VERR_ASSOC_ID_LEN = 5,
1151 VERR_CONN_ID = 6,
1152 VERR_CONN_ID_LEN = 7,
1153 VERR_CR_ASSOC = 8,
1154 VERR_CR_ASSOC_ACC_LEN = 9,
1155 VERR_CR_CONN = 10,
1156 VERR_CR_CONN_ACC_LEN = 11,
1157 VERR_DISCONN = 12,
1158 VERR_DISCONN_ACC_LEN = 13,
1159};
1160
1161static char *validation_errors[] = {
1162 "OK",
1163 "Not LS_ACC",
1164 "Not LSDESC_RQST",
1165 "Bad LSDESC_RQST Length",
1166 "Not Association ID",
1167 "Bad Association ID Length",
1168 "Not Connection ID",
1169 "Bad Connection ID Length",
1170 "Not CR_ASSOC Rqst",
1171 "Bad CR_ASSOC ACC Length",
1172 "Not CR_CONN Rqst",
1173 "Bad CR_CONN ACC Length",
1174 "Not Disconnect Rqst",
1175 "Bad Disconnect ACC Length",
1176};
1177
1178static int
1179nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1180 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1181{
1182 struct nvmefc_ls_req_op *lsop;
1183 struct nvmefc_ls_req *lsreq;
1184 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1185 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1186 int ret, fcret = 0;
1187
1188 lsop = kzalloc((sizeof(*lsop) +
1189 ctrl->lport->ops->lsrqst_priv_sz +
1190 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1191 if (!lsop) {
1192 ret = -ENOMEM;
1193 goto out_no_memory;
1194 }
1195 lsreq = &lsop->ls_req;
1196
1197 lsreq->private = (void *)&lsop[1];
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1199 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1200 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1201
1202 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1203 assoc_rqst->desc_list_len =
1204 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1205
1206 assoc_rqst->assoc_cmd.desc_tag =
1207 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1208 assoc_rqst->assoc_cmd.desc_len =
1209 fcnvme_lsdesc_len(
1210 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1211
1212 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1213 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1214
1215 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1216 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1217 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1218 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1219 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1220 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1221
1222 lsop->queue = queue;
1223 lsreq->rqstaddr = assoc_rqst;
1224 lsreq->rqstlen = sizeof(*assoc_rqst);
1225 lsreq->rspaddr = assoc_acc;
1226 lsreq->rsplen = sizeof(*assoc_acc);
1227 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1228
1229 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1230 if (ret)
1231 goto out_free_buffer;
1232
1233
1234
1235
1236 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1237 fcret = VERR_LSACC;
1238 else if (assoc_acc->hdr.desc_list_len !=
1239 fcnvme_lsdesc_len(
1240 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1241 fcret = VERR_CR_ASSOC_ACC_LEN;
1242 else if (assoc_acc->hdr.rqst.desc_tag !=
1243 cpu_to_be32(FCNVME_LSDESC_RQST))
1244 fcret = VERR_LSDESC_RQST;
1245 else if (assoc_acc->hdr.rqst.desc_len !=
1246 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1247 fcret = VERR_LSDESC_RQST_LEN;
1248 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1249 fcret = VERR_CR_ASSOC;
1250 else if (assoc_acc->associd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1252 fcret = VERR_ASSOC_ID;
1253 else if (assoc_acc->associd.desc_len !=
1254 fcnvme_lsdesc_len(
1255 sizeof(struct fcnvme_lsdesc_assoc_id)))
1256 fcret = VERR_ASSOC_ID_LEN;
1257 else if (assoc_acc->connectid.desc_tag !=
1258 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1259 fcret = VERR_CONN_ID;
1260 else if (assoc_acc->connectid.desc_len !=
1261 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1262 fcret = VERR_CONN_ID_LEN;
1263
1264 if (fcret) {
1265 ret = -EBADF;
1266 dev_err(ctrl->dev,
1267 "q %d Create Association LS failed: %s\n",
1268 queue->qnum, validation_errors[fcret]);
1269 } else {
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1275 }
1276
1277out_free_buffer:
1278 kfree(lsop);
1279out_no_memory:
1280 if (ret)
1281 dev_err(ctrl->dev,
1282 "queue %d connect admin queue failed (%d).\n",
1283 queue->qnum, ret);
1284 return ret;
1285}
1286
1287static int
1288nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1289 u16 qsize, u16 ersp_ratio)
1290{
1291 struct nvmefc_ls_req_op *lsop;
1292 struct nvmefc_ls_req *lsreq;
1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1294 struct fcnvme_ls_cr_conn_acc *conn_acc;
1295 int ret, fcret = 0;
1296
1297 lsop = kzalloc((sizeof(*lsop) +
1298 ctrl->lport->ops->lsrqst_priv_sz +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1300 if (!lsop) {
1301 ret = -ENOMEM;
1302 goto out_no_memory;
1303 }
1304 lsreq = &lsop->ls_req;
1305
1306 lsreq->private = (void *)&lsop[1];
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1308 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1310
1311 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1312 conn_rqst->desc_list_len = cpu_to_be32(
1313 sizeof(struct fcnvme_lsdesc_assoc_id) +
1314 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1315
1316 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1317 conn_rqst->associd.desc_len =
1318 fcnvme_lsdesc_len(
1319 sizeof(struct fcnvme_lsdesc_assoc_id));
1320 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1321 conn_rqst->connect_cmd.desc_tag =
1322 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1323 conn_rqst->connect_cmd.desc_len =
1324 fcnvme_lsdesc_len(
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1327 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1328 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1329
1330 lsop->queue = queue;
1331 lsreq->rqstaddr = conn_rqst;
1332 lsreq->rqstlen = sizeof(*conn_rqst);
1333 lsreq->rspaddr = conn_acc;
1334 lsreq->rsplen = sizeof(*conn_acc);
1335 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1336
1337 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1338 if (ret)
1339 goto out_free_buffer;
1340
1341
1342
1343
1344 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1345 fcret = VERR_LSACC;
1346 else if (conn_acc->hdr.desc_list_len !=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1348 fcret = VERR_CR_CONN_ACC_LEN;
1349 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1350 fcret = VERR_LSDESC_RQST;
1351 else if (conn_acc->hdr.rqst.desc_len !=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1353 fcret = VERR_LSDESC_RQST_LEN;
1354 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1355 fcret = VERR_CR_CONN;
1356 else if (conn_acc->connectid.desc_tag !=
1357 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1358 fcret = VERR_CONN_ID;
1359 else if (conn_acc->connectid.desc_len !=
1360 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1361 fcret = VERR_CONN_ID_LEN;
1362
1363 if (fcret) {
1364 ret = -EBADF;
1365 dev_err(ctrl->dev,
1366 "q %d Create I/O Connection LS failed: %s\n",
1367 queue->qnum, validation_errors[fcret]);
1368 } else {
1369 queue->connection_id =
1370 be64_to_cpu(conn_acc->connectid.connection_id);
1371 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1372 }
1373
1374out_free_buffer:
1375 kfree(lsop);
1376out_no_memory:
1377 if (ret)
1378 dev_err(ctrl->dev,
1379 "queue %d connect I/O queue failed (%d).\n",
1380 queue->qnum, ret);
1381 return ret;
1382}
1383
1384static void
1385nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1386{
1387 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1388
1389 __nvme_fc_finish_ls_req(lsop);
1390
1391
1392
1393 kfree(lsop);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static void
1414nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1415{
1416 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1417 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1418 struct nvmefc_ls_req_op *lsop;
1419 struct nvmefc_ls_req *lsreq;
1420 int ret;
1421
1422 lsop = kzalloc((sizeof(*lsop) +
1423 ctrl->lport->ops->lsrqst_priv_sz +
1424 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1425 GFP_KERNEL);
1426 if (!lsop)
1427
1428 return;
1429
1430 lsreq = &lsop->ls_req;
1431
1432 lsreq->private = (void *)&lsop[1];
1433 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
1434 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1435 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1436
1437 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
1438 discon_rqst->desc_list_len = cpu_to_be32(
1439 sizeof(struct fcnvme_lsdesc_assoc_id) +
1440 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1441
1442 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1443 discon_rqst->associd.desc_len =
1444 fcnvme_lsdesc_len(
1445 sizeof(struct fcnvme_lsdesc_assoc_id));
1446
1447 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1448
1449 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1450 FCNVME_LSDESC_DISCONN_CMD);
1451 discon_rqst->discon_cmd.desc_len =
1452 fcnvme_lsdesc_len(
1453 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1454
1455 lsreq->rqstaddr = discon_rqst;
1456 lsreq->rqstlen = sizeof(*discon_rqst);
1457 lsreq->rspaddr = discon_acc;
1458 lsreq->rsplen = sizeof(*discon_acc);
1459 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1460
1461 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1462 nvme_fc_disconnect_assoc_done);
1463 if (ret)
1464 kfree(lsop);
1465}
1466
1467
1468
1469
1470static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1471
1472static void
1473__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1474 struct nvme_fc_fcp_op *op)
1475{
1476 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1477 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1478 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1479 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1480
1481 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1482}
1483
1484static void
1485nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1486 unsigned int hctx_idx)
1487{
1488 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1489
1490 return __nvme_fc_exit_request(set->driver_data, op);
1491}
1492
1493static int
1494__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1495{
1496 unsigned long flags;
1497 int opstate;
1498
1499 spin_lock_irqsave(&ctrl->lock, flags);
1500 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1501 if (opstate != FCPOP_STATE_ACTIVE)
1502 atomic_set(&op->state, opstate);
1503 else if (ctrl->flags & FCCTRL_TERMIO)
1504 ctrl->iocnt++;
1505 spin_unlock_irqrestore(&ctrl->lock, flags);
1506
1507 if (opstate != FCPOP_STATE_ACTIVE)
1508 return -ECANCELED;
1509
1510 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1511 &ctrl->rport->remoteport,
1512 op->queue->lldd_handle,
1513 &op->fcp_req);
1514
1515 return 0;
1516}
1517
1518static void
1519nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1520{
1521 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1522 int i;
1523
1524
1525 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1526 return;
1527
1528 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1529 __nvme_fc_abort_op(ctrl, aen_op);
1530}
1531
1532static inline void
1533__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1534 struct nvme_fc_fcp_op *op, int opstate)
1535{
1536 unsigned long flags;
1537
1538 if (opstate == FCPOP_STATE_ABORTED) {
1539 spin_lock_irqsave(&ctrl->lock, flags);
1540 if (ctrl->flags & FCCTRL_TERMIO) {
1541 if (!--ctrl->iocnt)
1542 wake_up(&ctrl->ioabort_wait);
1543 }
1544 spin_unlock_irqrestore(&ctrl->lock, flags);
1545 }
1546}
1547
1548static void
1549nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1550{
1551 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1552 struct request *rq = op->rq;
1553 struct nvmefc_fcp_req *freq = &op->fcp_req;
1554 struct nvme_fc_ctrl *ctrl = op->ctrl;
1555 struct nvme_fc_queue *queue = op->queue;
1556 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1557 struct nvme_command *sqe = &op->cmd_iu.sqe;
1558 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1559 union nvme_result result;
1560 bool terminate_assoc = true;
1561 int opstate;
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1601
1602 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1603 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1604
1605 if (opstate == FCPOP_STATE_ABORTED)
1606 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1607 else if (freq->status) {
1608 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1609 dev_info(ctrl->ctrl.device,
1610 "NVME-FC{%d}: io failed due to lldd error %d\n",
1611 ctrl->cnum, freq->status);
1612 }
1613
1614
1615
1616
1617
1618
1619 if (status)
1620 goto done;
1621
1622
1623
1624
1625
1626
1627
1628
1629 switch (freq->rcv_rsplen) {
1630
1631 case 0:
1632 case NVME_FC_SIZEOF_ZEROS_RSP:
1633
1634
1635
1636
1637
1638 if (freq->transferred_length !=
1639 be32_to_cpu(op->cmd_iu.data_len)) {
1640 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1641 dev_info(ctrl->ctrl.device,
1642 "NVME-FC{%d}: io failed due to bad transfer "
1643 "length: %d vs expected %d\n",
1644 ctrl->cnum, freq->transferred_length,
1645 be32_to_cpu(op->cmd_iu.data_len));
1646 goto done;
1647 }
1648 result.u64 = 0;
1649 break;
1650
1651 case sizeof(struct nvme_fc_ersp_iu):
1652
1653
1654
1655
1656 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1657 (freq->rcv_rsplen / 4) ||
1658 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1659 freq->transferred_length ||
1660 op->rsp_iu.ersp_result ||
1661 sqe->common.command_id != cqe->command_id)) {
1662 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1663 dev_info(ctrl->ctrl.device,
1664 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
1665 "iu len %d, xfr len %d vs %d, status code "
1666 "%d, cmdid %d vs %d\n",
1667 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
1668 be32_to_cpu(op->rsp_iu.xfrd_len),
1669 freq->transferred_length,
1670 op->rsp_iu.ersp_result,
1671 sqe->common.command_id,
1672 cqe->command_id);
1673 goto done;
1674 }
1675 result = cqe->result;
1676 status = cqe->status;
1677 break;
1678
1679 default:
1680 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1681 dev_info(ctrl->ctrl.device,
1682 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
1683 "len %d\n",
1684 ctrl->cnum, freq->rcv_rsplen);
1685 goto done;
1686 }
1687
1688 terminate_assoc = false;
1689
1690done:
1691 if (op->flags & FCOP_FLAGS_AEN) {
1692 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1693 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1694 atomic_set(&op->state, FCPOP_STATE_IDLE);
1695 op->flags = FCOP_FLAGS_AEN;
1696 nvme_fc_ctrl_put(ctrl);
1697 goto check_error;
1698 }
1699
1700 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1701 nvme_end_request(rq, status, result);
1702
1703check_error:
1704 if (terminate_assoc)
1705 nvme_fc_error_recovery(ctrl, "transport detected io error");
1706}
1707
1708static int
1709__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1710 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1711 struct request *rq, u32 rqno)
1712{
1713 struct nvme_fcp_op_w_sgl *op_w_sgl =
1714 container_of(op, typeof(*op_w_sgl), op);
1715 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1716 int ret = 0;
1717
1718 memset(op, 0, sizeof(*op));
1719 op->fcp_req.cmdaddr = &op->cmd_iu;
1720 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1721 op->fcp_req.rspaddr = &op->rsp_iu;
1722 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1723 op->fcp_req.done = nvme_fc_fcpio_done;
1724 op->ctrl = ctrl;
1725 op->queue = queue;
1726 op->rq = rq;
1727 op->rqno = rqno;
1728
1729 cmdiu->format_id = NVME_CMD_FORMAT_ID;
1730 cmdiu->fc_id = NVME_CMD_FC_ID;
1731 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1732 if (queue->qnum)
1733 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
1734 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
1735 else
1736 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
1737
1738 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1739 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1740 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1741 dev_err(ctrl->dev,
1742 "FCP Op failed - cmdiu dma mapping failed.\n");
1743 ret = EFAULT;
1744 goto out_on_error;
1745 }
1746
1747 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1748 &op->rsp_iu, sizeof(op->rsp_iu),
1749 DMA_FROM_DEVICE);
1750 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1751 dev_err(ctrl->dev,
1752 "FCP Op failed - rspiu dma mapping failed.\n");
1753 ret = EFAULT;
1754 }
1755
1756 atomic_set(&op->state, FCPOP_STATE_IDLE);
1757out_on_error:
1758 return ret;
1759}
1760
1761static int
1762nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1763 unsigned int hctx_idx, unsigned int numa_node)
1764{
1765 struct nvme_fc_ctrl *ctrl = set->driver_data;
1766 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
1767 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1768 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1769 int res;
1770
1771 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1772 if (res)
1773 return res;
1774 op->op.fcp_req.first_sgl = &op->sgl[0];
1775 op->op.fcp_req.private = &op->priv[0];
1776 nvme_req(rq)->ctrl = &ctrl->ctrl;
1777 return res;
1778}
1779
1780static int
1781nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1782{
1783 struct nvme_fc_fcp_op *aen_op;
1784 struct nvme_fc_cmd_iu *cmdiu;
1785 struct nvme_command *sqe;
1786 void *private;
1787 int i, ret;
1788
1789 aen_op = ctrl->aen_ops;
1790 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1791 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1792 GFP_KERNEL);
1793 if (!private)
1794 return -ENOMEM;
1795
1796 cmdiu = &aen_op->cmd_iu;
1797 sqe = &cmdiu->sqe;
1798 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1799 aen_op, (struct request *)NULL,
1800 (NVME_AQ_BLK_MQ_DEPTH + i));
1801 if (ret) {
1802 kfree(private);
1803 return ret;
1804 }
1805
1806 aen_op->flags = FCOP_FLAGS_AEN;
1807 aen_op->fcp_req.private = private;
1808
1809 memset(sqe, 0, sizeof(*sqe));
1810 sqe->common.opcode = nvme_admin_async_event;
1811
1812 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1813 }
1814 return 0;
1815}
1816
1817static void
1818nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1819{
1820 struct nvme_fc_fcp_op *aen_op;
1821 int i;
1822
1823 aen_op = ctrl->aen_ops;
1824 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1825 if (!aen_op->fcp_req.private)
1826 continue;
1827
1828 __nvme_fc_exit_request(ctrl, aen_op);
1829
1830 kfree(aen_op->fcp_req.private);
1831 aen_op->fcp_req.private = NULL;
1832 }
1833}
1834
1835static inline void
1836__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1837 unsigned int qidx)
1838{
1839 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1840
1841 hctx->driver_data = queue;
1842 queue->hctx = hctx;
1843}
1844
1845static int
1846nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1847 unsigned int hctx_idx)
1848{
1849 struct nvme_fc_ctrl *ctrl = data;
1850
1851 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1852
1853 return 0;
1854}
1855
1856static int
1857nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1858 unsigned int hctx_idx)
1859{
1860 struct nvme_fc_ctrl *ctrl = data;
1861
1862 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1863
1864 return 0;
1865}
1866
1867static void
1868nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1869{
1870 struct nvme_fc_queue *queue;
1871
1872 queue = &ctrl->queues[idx];
1873 memset(queue, 0, sizeof(*queue));
1874 queue->ctrl = ctrl;
1875 queue->qnum = idx;
1876 atomic_set(&queue->csn, 0);
1877 queue->dev = ctrl->dev;
1878
1879 if (idx > 0)
1880 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1881 else
1882 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static void
1905nvme_fc_free_queue(struct nvme_fc_queue *queue)
1906{
1907 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1908 return;
1909
1910 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1911
1912
1913
1914
1915
1916
1917 queue->connection_id = 0;
1918 atomic_set(&queue->csn, 0);
1919}
1920
1921static void
1922__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1923 struct nvme_fc_queue *queue, unsigned int qidx)
1924{
1925 if (ctrl->lport->ops->delete_queue)
1926 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1927 queue->lldd_handle);
1928 queue->lldd_handle = NULL;
1929}
1930
1931static void
1932nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1933{
1934 int i;
1935
1936 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1937 nvme_fc_free_queue(&ctrl->queues[i]);
1938}
1939
1940static int
1941__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1942 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1943{
1944 int ret = 0;
1945
1946 queue->lldd_handle = NULL;
1947 if (ctrl->lport->ops->create_queue)
1948 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1949 qidx, qsize, &queue->lldd_handle);
1950
1951 return ret;
1952}
1953
1954static void
1955nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1956{
1957 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1958 int i;
1959
1960 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1961 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1962}
1963
1964static int
1965nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1966{
1967 struct nvme_fc_queue *queue = &ctrl->queues[1];
1968 int i, ret;
1969
1970 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1971 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1972 if (ret)
1973 goto delete_queues;
1974 }
1975
1976 return 0;
1977
1978delete_queues:
1979 for (; i >= 0; i--)
1980 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1981 return ret;
1982}
1983
1984static int
1985nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1986{
1987 int i, ret = 0;
1988
1989 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1990 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1991 (qsize / 5));
1992 if (ret)
1993 break;
1994 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
1995 if (ret)
1996 break;
1997
1998 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1999 }
2000
2001 return ret;
2002}
2003
2004static void
2005nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2006{
2007 int i;
2008
2009 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2010 nvme_fc_init_queue(ctrl, i);
2011}
2012
2013static void
2014nvme_fc_ctrl_free(struct kref *ref)
2015{
2016 struct nvme_fc_ctrl *ctrl =
2017 container_of(ref, struct nvme_fc_ctrl, ref);
2018 unsigned long flags;
2019
2020 if (ctrl->ctrl.tagset) {
2021 blk_cleanup_queue(ctrl->ctrl.connect_q);
2022 blk_mq_free_tag_set(&ctrl->tag_set);
2023 }
2024
2025
2026 spin_lock_irqsave(&ctrl->rport->lock, flags);
2027 list_del(&ctrl->ctrl_list);
2028 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2029
2030 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2031 blk_cleanup_queue(ctrl->ctrl.admin_q);
2032 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2033 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2034
2035 kfree(ctrl->queues);
2036
2037 put_device(ctrl->dev);
2038 nvme_fc_rport_put(ctrl->rport);
2039
2040 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2041 if (ctrl->ctrl.opts)
2042 nvmf_free_options(ctrl->ctrl.opts);
2043 kfree(ctrl);
2044}
2045
2046static void
2047nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2048{
2049 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2050}
2051
2052static int
2053nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2054{
2055 return kref_get_unless_zero(&ctrl->ref);
2056}
2057
2058
2059
2060
2061
2062static void
2063nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2064{
2065 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2066
2067 WARN_ON(nctrl != &ctrl->ctrl);
2068
2069 nvme_fc_ctrl_put(ctrl);
2070}
2071
2072static void
2073nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2074{
2075 int active;
2076
2077
2078
2079
2080
2081
2082
2083
2084 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2085 active = atomic_xchg(&ctrl->err_work_active, 1);
2086 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
2087 atomic_set(&ctrl->err_work_active, 0);
2088 WARN_ON(1);
2089 }
2090 return;
2091 }
2092
2093
2094 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2095 return;
2096
2097 dev_warn(ctrl->ctrl.device,
2098 "NVME-FC{%d}: transport association error detected: %s\n",
2099 ctrl->cnum, errmsg);
2100 dev_warn(ctrl->ctrl.device,
2101 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2102
2103 nvme_reset_ctrl(&ctrl->ctrl);
2104}
2105
2106static enum blk_eh_timer_return
2107nvme_fc_timeout(struct request *rq, bool reserved)
2108{
2109 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2110 struct nvme_fc_ctrl *ctrl = op->ctrl;
2111
2112
2113
2114
2115
2116
2117
2118
2119 nvme_fc_error_recovery(ctrl, "io timeout error");
2120
2121
2122
2123
2124
2125
2126 return BLK_EH_RESET_TIMER;
2127}
2128
2129static int
2130nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2131 struct nvme_fc_fcp_op *op)
2132{
2133 struct nvmefc_fcp_req *freq = &op->fcp_req;
2134 int ret;
2135
2136 freq->sg_cnt = 0;
2137
2138 if (!blk_rq_nr_phys_segments(rq))
2139 return 0;
2140
2141 freq->sg_table.sgl = freq->first_sgl;
2142 ret = sg_alloc_table_chained(&freq->sg_table,
2143 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2144 NVME_INLINE_SG_CNT);
2145 if (ret)
2146 return -ENOMEM;
2147
2148 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2149 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2150 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2151 op->nents, rq_dma_dir(rq));
2152 if (unlikely(freq->sg_cnt <= 0)) {
2153 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2154 freq->sg_cnt = 0;
2155 return -EFAULT;
2156 }
2157
2158
2159
2160
2161 return 0;
2162}
2163
2164static void
2165nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2166 struct nvme_fc_fcp_op *op)
2167{
2168 struct nvmefc_fcp_req *freq = &op->fcp_req;
2169
2170 if (!freq->sg_cnt)
2171 return;
2172
2173 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2174 rq_dma_dir(rq));
2175
2176 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2177
2178 freq->sg_cnt = 0;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204static blk_status_t
2205nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2206 struct nvme_fc_fcp_op *op, u32 data_len,
2207 enum nvmefc_fcp_datadir io_dir)
2208{
2209 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2210 struct nvme_command *sqe = &cmdiu->sqe;
2211 int ret, opstate;
2212
2213
2214
2215
2216
2217 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2218 return BLK_STS_RESOURCE;
2219
2220 if (!nvme_fc_ctrl_get(ctrl))
2221 return BLK_STS_IOERR;
2222
2223
2224 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2225 cmdiu->data_len = cpu_to_be32(data_len);
2226 switch (io_dir) {
2227 case NVMEFC_FCP_WRITE:
2228 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2229 break;
2230 case NVMEFC_FCP_READ:
2231 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2232 break;
2233 case NVMEFC_FCP_NODATA:
2234 cmdiu->flags = 0;
2235 break;
2236 }
2237 op->fcp_req.payload_length = data_len;
2238 op->fcp_req.io_dir = io_dir;
2239 op->fcp_req.transferred_length = 0;
2240 op->fcp_req.rcv_rsplen = 0;
2241 op->fcp_req.status = NVME_SC_SUCCESS;
2242 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2243
2244
2245
2246
2247
2248 WARN_ON_ONCE(sqe->common.metadata);
2249 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2250
2251
2252
2253
2254
2255
2256
2257
2258 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2259 NVME_SGL_FMT_TRANSPORT_A;
2260 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2261 sqe->rw.dptr.sgl.addr = 0;
2262
2263 if (!(op->flags & FCOP_FLAGS_AEN)) {
2264 ret = nvme_fc_map_data(ctrl, op->rq, op);
2265 if (ret < 0) {
2266 nvme_cleanup_cmd(op->rq);
2267 nvme_fc_ctrl_put(ctrl);
2268 if (ret == -ENOMEM || ret == -EAGAIN)
2269 return BLK_STS_RESOURCE;
2270 return BLK_STS_IOERR;
2271 }
2272 }
2273
2274 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2275 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2276
2277 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2278
2279 if (!(op->flags & FCOP_FLAGS_AEN))
2280 blk_mq_start_request(op->rq);
2281
2282 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2283 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2284 &ctrl->rport->remoteport,
2285 queue->lldd_handle, &op->fcp_req);
2286
2287 if (ret) {
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2301 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2302
2303 if (!(op->flags & FCOP_FLAGS_AEN))
2304 nvme_fc_unmap_data(ctrl, op->rq, op);
2305
2306 nvme_cleanup_cmd(op->rq);
2307 nvme_fc_ctrl_put(ctrl);
2308
2309 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2310 ret != -EBUSY)
2311 return BLK_STS_IOERR;
2312
2313 return BLK_STS_RESOURCE;
2314 }
2315
2316 return BLK_STS_OK;
2317}
2318
2319static blk_status_t
2320nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2321 const struct blk_mq_queue_data *bd)
2322{
2323 struct nvme_ns *ns = hctx->queue->queuedata;
2324 struct nvme_fc_queue *queue = hctx->driver_data;
2325 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2326 struct request *rq = bd->rq;
2327 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2328 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2329 struct nvme_command *sqe = &cmdiu->sqe;
2330 enum nvmefc_fcp_datadir io_dir;
2331 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2332 u32 data_len;
2333 blk_status_t ret;
2334
2335 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2336 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2337 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2338
2339 ret = nvme_setup_cmd(ns, rq, sqe);
2340 if (ret)
2341 return ret;
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351 if (blk_rq_nr_phys_segments(rq)) {
2352 data_len = blk_rq_payload_bytes(rq);
2353 io_dir = ((rq_data_dir(rq) == WRITE) ?
2354 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2355 } else {
2356 data_len = 0;
2357 io_dir = NVMEFC_FCP_NODATA;
2358 }
2359
2360
2361 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2362}
2363
2364static void
2365nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2366{
2367 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2368 struct nvme_fc_fcp_op *aen_op;
2369 unsigned long flags;
2370 bool terminating = false;
2371 blk_status_t ret;
2372
2373 spin_lock_irqsave(&ctrl->lock, flags);
2374 if (ctrl->flags & FCCTRL_TERMIO)
2375 terminating = true;
2376 spin_unlock_irqrestore(&ctrl->lock, flags);
2377
2378 if (terminating)
2379 return;
2380
2381 aen_op = &ctrl->aen_ops[0];
2382
2383 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2384 NVMEFC_FCP_NODATA);
2385 if (ret)
2386 dev_err(ctrl->ctrl.device,
2387 "failed async event work\n");
2388}
2389
2390static void
2391nvme_fc_complete_rq(struct request *rq)
2392{
2393 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2394 struct nvme_fc_ctrl *ctrl = op->ctrl;
2395
2396 atomic_set(&op->state, FCPOP_STATE_IDLE);
2397
2398 nvme_fc_unmap_data(ctrl, rq, op);
2399 nvme_complete_rq(rq);
2400 nvme_fc_ctrl_put(ctrl);
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416static bool
2417nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2418{
2419 struct nvme_ctrl *nctrl = data;
2420 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2421 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2422
2423 __nvme_fc_abort_op(ctrl, op);
2424 return true;
2425}
2426
2427
2428static const struct blk_mq_ops nvme_fc_mq_ops = {
2429 .queue_rq = nvme_fc_queue_rq,
2430 .complete = nvme_fc_complete_rq,
2431 .init_request = nvme_fc_init_request,
2432 .exit_request = nvme_fc_exit_request,
2433 .init_hctx = nvme_fc_init_hctx,
2434 .timeout = nvme_fc_timeout,
2435};
2436
2437static int
2438nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2439{
2440 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2441 unsigned int nr_io_queues;
2442 int ret;
2443
2444 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2445 ctrl->lport->ops->max_hw_queues);
2446 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2447 if (ret) {
2448 dev_info(ctrl->ctrl.device,
2449 "set_queue_count failed: %d\n", ret);
2450 return ret;
2451 }
2452
2453 ctrl->ctrl.queue_count = nr_io_queues + 1;
2454 if (!nr_io_queues)
2455 return 0;
2456
2457 nvme_fc_init_io_queues(ctrl);
2458
2459 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2460 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2461 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2462 ctrl->tag_set.reserved_tags = 1;
2463 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2464 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2465 ctrl->tag_set.cmd_size =
2466 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2467 ctrl->lport->ops->fcprqst_priv_sz);
2468 ctrl->tag_set.driver_data = ctrl;
2469 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2470 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2471
2472 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2473 if (ret)
2474 return ret;
2475
2476 ctrl->ctrl.tagset = &ctrl->tag_set;
2477
2478 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2479 if (IS_ERR(ctrl->ctrl.connect_q)) {
2480 ret = PTR_ERR(ctrl->ctrl.connect_q);
2481 goto out_free_tag_set;
2482 }
2483
2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2485 if (ret)
2486 goto out_cleanup_blk_queue;
2487
2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2489 if (ret)
2490 goto out_delete_hw_queues;
2491
2492 ctrl->ioq_live = true;
2493
2494 return 0;
2495
2496out_delete_hw_queues:
2497 nvme_fc_delete_hw_io_queues(ctrl);
2498out_cleanup_blk_queue:
2499 blk_cleanup_queue(ctrl->ctrl.connect_q);
2500out_free_tag_set:
2501 blk_mq_free_tag_set(&ctrl->tag_set);
2502 nvme_fc_free_io_queues(ctrl);
2503
2504
2505 ctrl->ctrl.tagset = NULL;
2506
2507 return ret;
2508}
2509
2510static int
2511nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2512{
2513 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2514 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2515 unsigned int nr_io_queues;
2516 int ret;
2517
2518 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2519 ctrl->lport->ops->max_hw_queues);
2520 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2521 if (ret) {
2522 dev_info(ctrl->ctrl.device,
2523 "set_queue_count failed: %d\n", ret);
2524 return ret;
2525 }
2526
2527 if (!nr_io_queues && prior_ioq_cnt) {
2528 dev_info(ctrl->ctrl.device,
2529 "Fail Reconnect: At least 1 io queue "
2530 "required (was %d)\n", prior_ioq_cnt);
2531 return -ENOSPC;
2532 }
2533
2534 ctrl->ctrl.queue_count = nr_io_queues + 1;
2535
2536 if (ctrl->ctrl.queue_count == 1)
2537 return 0;
2538
2539 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2540 if (ret)
2541 goto out_free_io_queues;
2542
2543 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2544 if (ret)
2545 goto out_delete_hw_queues;
2546
2547 if (prior_ioq_cnt != nr_io_queues)
2548 dev_info(ctrl->ctrl.device,
2549 "reconnect: revising io queue count from %d to %d\n",
2550 prior_ioq_cnt, nr_io_queues);
2551 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2552
2553 return 0;
2554
2555out_delete_hw_queues:
2556 nvme_fc_delete_hw_io_queues(ctrl);
2557out_free_io_queues:
2558 nvme_fc_free_io_queues(ctrl);
2559 return ret;
2560}
2561
2562static void
2563nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2564{
2565 struct nvme_fc_lport *lport = rport->lport;
2566
2567 atomic_inc(&lport->act_rport_cnt);
2568}
2569
2570static void
2571nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2572{
2573 struct nvme_fc_lport *lport = rport->lport;
2574 u32 cnt;
2575
2576 cnt = atomic_dec_return(&lport->act_rport_cnt);
2577 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2578 lport->ops->localport_delete(&lport->localport);
2579}
2580
2581static int
2582nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2583{
2584 struct nvme_fc_rport *rport = ctrl->rport;
2585 u32 cnt;
2586
2587 if (ctrl->assoc_active)
2588 return 1;
2589
2590 ctrl->assoc_active = true;
2591 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2592 if (cnt == 1)
2593 nvme_fc_rport_active_on_lport(rport);
2594
2595 return 0;
2596}
2597
2598static int
2599nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2600{
2601 struct nvme_fc_rport *rport = ctrl->rport;
2602 struct nvme_fc_lport *lport = rport->lport;
2603 u32 cnt;
2604
2605
2606
2607 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2608 if (cnt == 0) {
2609 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2610 lport->ops->remoteport_delete(&rport->remoteport);
2611 nvme_fc_rport_inactive_on_lport(rport);
2612 }
2613
2614 return 0;
2615}
2616
2617
2618
2619
2620
2621static int
2622nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2623{
2624 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2625 int ret;
2626 bool changed;
2627
2628 ++ctrl->ctrl.nr_reconnects;
2629
2630 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2631 return -ENODEV;
2632
2633 if (nvme_fc_ctlr_active_on_rport(ctrl))
2634 return -ENOTUNIQ;
2635
2636 dev_info(ctrl->ctrl.device,
2637 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2638 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2639 ctrl->cnum, ctrl->lport->localport.port_name,
2640 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2641
2642
2643
2644
2645
2646 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2647 NVME_AQ_DEPTH);
2648 if (ret)
2649 goto out_free_queue;
2650
2651 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2652 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2653 if (ret)
2654 goto out_delete_hw_queue;
2655
2656 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2657 if (ret)
2658 goto out_disconnect_admin_queue;
2659
2660 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2661
2662
2663
2664
2665
2666
2667
2668
2669 ret = nvme_enable_ctrl(&ctrl->ctrl);
2670 if (ret)
2671 goto out_disconnect_admin_queue;
2672
2673 ctrl->ctrl.max_hw_sectors =
2674 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2675
2676 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2677
2678 ret = nvme_init_identify(&ctrl->ctrl);
2679 if (ret)
2680 goto out_disconnect_admin_queue;
2681
2682
2683
2684
2685 if (ctrl->ctrl.icdoff) {
2686 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2687 ctrl->ctrl.icdoff);
2688 goto out_disconnect_admin_queue;
2689 }
2690
2691
2692
2693 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2694
2695 dev_warn(ctrl->ctrl.device,
2696 "queue_size %zu > ctrl maxcmd %u, reducing "
2697 "to maxcmd\n",
2698 opts->queue_size, ctrl->ctrl.maxcmd);
2699 opts->queue_size = ctrl->ctrl.maxcmd;
2700 }
2701
2702 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2703
2704 dev_warn(ctrl->ctrl.device,
2705 "queue_size %zu > ctrl sqsize %u, reducing "
2706 "to sqsize\n",
2707 opts->queue_size, ctrl->ctrl.sqsize + 1);
2708 opts->queue_size = ctrl->ctrl.sqsize + 1;
2709 }
2710
2711 ret = nvme_fc_init_aen_ops(ctrl);
2712 if (ret)
2713 goto out_term_aen_ops;
2714
2715
2716
2717
2718
2719 if (ctrl->ctrl.queue_count > 1) {
2720 if (!ctrl->ioq_live)
2721 ret = nvme_fc_create_io_queues(ctrl);
2722 else
2723 ret = nvme_fc_recreate_io_queues(ctrl);
2724 if (ret)
2725 goto out_term_aen_ops;
2726 }
2727
2728 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2729
2730 ctrl->ctrl.nr_reconnects = 0;
2731
2732 if (changed)
2733 nvme_start_ctrl(&ctrl->ctrl);
2734
2735 return 0;
2736
2737out_term_aen_ops:
2738 nvme_fc_term_aen_ops(ctrl);
2739out_disconnect_admin_queue:
2740
2741 nvme_fc_xmt_disconnect_assoc(ctrl);
2742 ctrl->association_id = 0;
2743out_delete_hw_queue:
2744 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2745out_free_queue:
2746 nvme_fc_free_queue(&ctrl->queues[0]);
2747 ctrl->assoc_active = false;
2748 nvme_fc_ctlr_inactive_on_rport(ctrl);
2749
2750 return ret;
2751}
2752
2753
2754
2755
2756
2757
2758
2759static void
2760nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2761{
2762 unsigned long flags;
2763
2764 if (!ctrl->assoc_active)
2765 return;
2766 ctrl->assoc_active = false;
2767
2768 spin_lock_irqsave(&ctrl->lock, flags);
2769 ctrl->flags |= FCCTRL_TERMIO;
2770 ctrl->iocnt = 0;
2771 spin_unlock_irqrestore(&ctrl->lock, flags);
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 if (ctrl->ctrl.queue_count > 1) {
2786 nvme_stop_queues(&ctrl->ctrl);
2787 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2788 nvme_fc_terminate_exchange, &ctrl->ctrl);
2789 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2790 }
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2810 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2811 nvme_fc_terminate_exchange, &ctrl->ctrl);
2812 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2813
2814
2815 nvme_fc_abort_aen_ops(ctrl);
2816
2817
2818 spin_lock_irq(&ctrl->lock);
2819 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2820 ctrl->flags &= ~FCCTRL_TERMIO;
2821 spin_unlock_irq(&ctrl->lock);
2822
2823 nvme_fc_term_aen_ops(ctrl);
2824
2825
2826
2827
2828
2829
2830
2831 if (ctrl->association_id)
2832 nvme_fc_xmt_disconnect_assoc(ctrl);
2833
2834 ctrl->association_id = 0;
2835
2836 if (ctrl->ctrl.tagset) {
2837 nvme_fc_delete_hw_io_queues(ctrl);
2838 nvme_fc_free_io_queues(ctrl);
2839 }
2840
2841 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2842 nvme_fc_free_queue(&ctrl->queues[0]);
2843
2844
2845 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2846
2847
2848 nvme_start_queues(&ctrl->ctrl);
2849
2850 nvme_fc_ctlr_inactive_on_rport(ctrl);
2851}
2852
2853static void
2854nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2855{
2856 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2857
2858 cancel_work_sync(&ctrl->err_work);
2859 cancel_delayed_work_sync(&ctrl->connect_work);
2860
2861
2862
2863
2864 nvme_fc_delete_association(ctrl);
2865}
2866
2867static void
2868nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2869{
2870 struct nvme_fc_rport *rport = ctrl->rport;
2871 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2872 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2873 bool recon = true;
2874
2875 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2876 return;
2877
2878 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2879 dev_info(ctrl->ctrl.device,
2880 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2881 ctrl->cnum, status);
2882 else if (time_after_eq(jiffies, rport->dev_loss_end))
2883 recon = false;
2884
2885 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2886 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2887 dev_info(ctrl->ctrl.device,
2888 "NVME-FC{%d}: Reconnect attempt in %ld "
2889 "seconds\n",
2890 ctrl->cnum, recon_delay / HZ);
2891 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2892 recon_delay = rport->dev_loss_end - jiffies;
2893
2894 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2895 } else {
2896 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2897 dev_warn(ctrl->ctrl.device,
2898 "NVME-FC{%d}: Max reconnect attempts (%d) "
2899 "reached.\n",
2900 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2901 else
2902 dev_warn(ctrl->ctrl.device,
2903 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2904 "while waiting for remoteport connectivity.\n",
2905 ctrl->cnum, portptr->dev_loss_tmo);
2906 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2907 }
2908}
2909
2910static void
2911__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2912{
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
2924 nvme_stop_keep_alive(&ctrl->ctrl);
2925
2926
2927 nvme_fc_delete_association(ctrl);
2928 }
2929
2930 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2931 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2932 dev_err(ctrl->ctrl.device,
2933 "NVME-FC{%d}: error_recovery: Couldn't change state "
2934 "to CONNECTING\n", ctrl->cnum);
2935}
2936
2937static void
2938nvme_fc_reset_ctrl_work(struct work_struct *work)
2939{
2940 struct nvme_fc_ctrl *ctrl =
2941 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2942 int ret;
2943
2944 __nvme_fc_terminate_io(ctrl);
2945
2946 nvme_stop_ctrl(&ctrl->ctrl);
2947
2948 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2949 ret = nvme_fc_create_association(ctrl);
2950 else
2951 ret = -ENOTCONN;
2952
2953 if (ret)
2954 nvme_fc_reconnect_or_delete(ctrl, ret);
2955 else
2956 dev_info(ctrl->ctrl.device,
2957 "NVME-FC{%d}: controller reset complete\n",
2958 ctrl->cnum);
2959}
2960
2961static void
2962nvme_fc_connect_err_work(struct work_struct *work)
2963{
2964 struct nvme_fc_ctrl *ctrl =
2965 container_of(work, struct nvme_fc_ctrl, err_work);
2966
2967 __nvme_fc_terminate_io(ctrl);
2968
2969 atomic_set(&ctrl->err_work_active, 0);
2970
2971
2972
2973
2974
2975
2976
2977}
2978
2979static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2980 .name = "fc",
2981 .module = THIS_MODULE,
2982 .flags = NVME_F_FABRICS,
2983 .reg_read32 = nvmf_reg_read32,
2984 .reg_read64 = nvmf_reg_read64,
2985 .reg_write32 = nvmf_reg_write32,
2986 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2987 .submit_async_event = nvme_fc_submit_async_event,
2988 .delete_ctrl = nvme_fc_delete_ctrl,
2989 .get_address = nvmf_get_address,
2990};
2991
2992static void
2993nvme_fc_connect_ctrl_work(struct work_struct *work)
2994{
2995 int ret;
2996
2997 struct nvme_fc_ctrl *ctrl =
2998 container_of(to_delayed_work(work),
2999 struct nvme_fc_ctrl, connect_work);
3000
3001 ret = nvme_fc_create_association(ctrl);
3002 if (ret)
3003 nvme_fc_reconnect_or_delete(ctrl, ret);
3004 else
3005 dev_info(ctrl->ctrl.device,
3006 "NVME-FC{%d}: controller connect complete\n",
3007 ctrl->cnum);
3008}
3009
3010
3011static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3012 .queue_rq = nvme_fc_queue_rq,
3013 .complete = nvme_fc_complete_rq,
3014 .init_request = nvme_fc_init_request,
3015 .exit_request = nvme_fc_exit_request,
3016 .init_hctx = nvme_fc_init_admin_hctx,
3017 .timeout = nvme_fc_timeout,
3018};
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029static bool
3030nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3031 struct nvmf_ctrl_options *opts)
3032{
3033 struct nvme_fc_ctrl *ctrl;
3034 unsigned long flags;
3035 bool found = false;
3036
3037 spin_lock_irqsave(&rport->lock, flags);
3038 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3039 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3040 if (found)
3041 break;
3042 }
3043 spin_unlock_irqrestore(&rport->lock, flags);
3044
3045 return found;
3046}
3047
3048static struct nvme_ctrl *
3049nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3050 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3051{
3052 struct nvme_fc_ctrl *ctrl;
3053 unsigned long flags;
3054 int ret, idx;
3055
3056 if (!(rport->remoteport.port_role &
3057 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3058 ret = -EBADR;
3059 goto out_fail;
3060 }
3061
3062 if (!opts->duplicate_connect &&
3063 nvme_fc_existing_controller(rport, opts)) {
3064 ret = -EALREADY;
3065 goto out_fail;
3066 }
3067
3068 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3069 if (!ctrl) {
3070 ret = -ENOMEM;
3071 goto out_fail;
3072 }
3073
3074 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3075 if (idx < 0) {
3076 ret = -ENOSPC;
3077 goto out_free_ctrl;
3078 }
3079
3080 ctrl->ctrl.opts = opts;
3081 ctrl->ctrl.nr_reconnects = 0;
3082 if (lport->dev)
3083 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3084 else
3085 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3086 INIT_LIST_HEAD(&ctrl->ctrl_list);
3087 ctrl->lport = lport;
3088 ctrl->rport = rport;
3089 ctrl->dev = lport->dev;
3090 ctrl->cnum = idx;
3091 ctrl->ioq_live = false;
3092 ctrl->assoc_active = false;
3093 atomic_set(&ctrl->err_work_active, 0);
3094 init_waitqueue_head(&ctrl->ioabort_wait);
3095
3096 get_device(ctrl->dev);
3097 kref_init(&ctrl->ref);
3098
3099 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3100 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3101 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3102 spin_lock_init(&ctrl->lock);
3103
3104
3105 ctrl->ctrl.queue_count = min_t(unsigned int,
3106 opts->nr_io_queues,
3107 lport->ops->max_hw_queues);
3108 ctrl->ctrl.queue_count++;
3109
3110 ctrl->ctrl.sqsize = opts->queue_size - 1;
3111 ctrl->ctrl.kato = opts->kato;
3112 ctrl->ctrl.cntlid = 0xffff;
3113
3114 ret = -ENOMEM;
3115 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3116 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3117 if (!ctrl->queues)
3118 goto out_free_ida;
3119
3120 nvme_fc_init_queue(ctrl, 0);
3121
3122 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3123 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3124 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3125 ctrl->admin_tag_set.reserved_tags = 2;
3126 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3127 ctrl->admin_tag_set.cmd_size =
3128 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3129 ctrl->lport->ops->fcprqst_priv_sz);
3130 ctrl->admin_tag_set.driver_data = ctrl;
3131 ctrl->admin_tag_set.nr_hw_queues = 1;
3132 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3133 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3134
3135 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3136 if (ret)
3137 goto out_free_queues;
3138 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3139
3140 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3141 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3142 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3143 goto out_free_admin_tag_set;
3144 }
3145
3146 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3147 if (IS_ERR(ctrl->ctrl.admin_q)) {
3148 ret = PTR_ERR(ctrl->ctrl.admin_q);
3149 goto out_cleanup_fabrics_q;
3150 }
3151
3152
3153
3154
3155
3156
3157
3158
3159 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3160 if (ret)
3161 goto out_cleanup_admin_q;
3162
3163
3164
3165 spin_lock_irqsave(&rport->lock, flags);
3166 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3167 spin_unlock_irqrestore(&rport->lock, flags);
3168
3169 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3170 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3171 dev_err(ctrl->ctrl.device,
3172 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3173 goto fail_ctrl;
3174 }
3175
3176 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3177 dev_err(ctrl->ctrl.device,
3178 "NVME-FC{%d}: failed to schedule initial connect\n",
3179 ctrl->cnum);
3180 goto fail_ctrl;
3181 }
3182
3183 flush_delayed_work(&ctrl->connect_work);
3184
3185 dev_info(ctrl->ctrl.device,
3186 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3187 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3188
3189 return &ctrl->ctrl;
3190
3191fail_ctrl:
3192 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3193 cancel_work_sync(&ctrl->ctrl.reset_work);
3194 cancel_work_sync(&ctrl->err_work);
3195 cancel_delayed_work_sync(&ctrl->connect_work);
3196
3197 ctrl->ctrl.opts = NULL;
3198
3199
3200 nvme_uninit_ctrl(&ctrl->ctrl);
3201
3202
3203 nvme_put_ctrl(&ctrl->ctrl);
3204
3205
3206
3207
3208
3209
3210
3211
3212 nvme_fc_rport_get(rport);
3213
3214 return ERR_PTR(-EIO);
3215
3216out_cleanup_admin_q:
3217 blk_cleanup_queue(ctrl->ctrl.admin_q);
3218out_cleanup_fabrics_q:
3219 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3220out_free_admin_tag_set:
3221 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3222out_free_queues:
3223 kfree(ctrl->queues);
3224out_free_ida:
3225 put_device(ctrl->dev);
3226 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3227out_free_ctrl:
3228 kfree(ctrl);
3229out_fail:
3230
3231 return ERR_PTR(ret);
3232}
3233
3234
3235struct nvmet_fc_traddr {
3236 u64 nn;
3237 u64 pn;
3238};
3239
3240static int
3241__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3242{
3243 u64 token64;
3244
3245 if (match_u64(sstr, &token64))
3246 return -EINVAL;
3247 *val = token64;
3248
3249 return 0;
3250}
3251
3252
3253
3254
3255
3256
3257static int
3258nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3259{
3260 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3261 substring_t wwn = { name, &name[sizeof(name)-1] };
3262 int nnoffset, pnoffset;
3263
3264
3265 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3266 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3267 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3268 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3269 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3270 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3271 NVME_FC_TRADDR_OXNNLEN;
3272 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3273 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3274 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3275 "pn-", NVME_FC_TRADDR_NNLEN))) {
3276 nnoffset = NVME_FC_TRADDR_NNLEN;
3277 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3278 } else
3279 goto out_einval;
3280
3281 name[0] = '0';
3282 name[1] = 'x';
3283 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3284
3285 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3286 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3287 goto out_einval;
3288
3289 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3290 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3291 goto out_einval;
3292
3293 return 0;
3294
3295out_einval:
3296 pr_warn("%s: bad traddr string\n", __func__);
3297 return -EINVAL;
3298}
3299
3300static struct nvme_ctrl *
3301nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3302{
3303 struct nvme_fc_lport *lport;
3304 struct nvme_fc_rport *rport;
3305 struct nvme_ctrl *ctrl;
3306 struct nvmet_fc_traddr laddr = { 0L, 0L };
3307 struct nvmet_fc_traddr raddr = { 0L, 0L };
3308 unsigned long flags;
3309 int ret;
3310
3311 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3312 if (ret || !raddr.nn || !raddr.pn)
3313 return ERR_PTR(-EINVAL);
3314
3315 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3316 if (ret || !laddr.nn || !laddr.pn)
3317 return ERR_PTR(-EINVAL);
3318
3319
3320 spin_lock_irqsave(&nvme_fc_lock, flags);
3321 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3322 if (lport->localport.node_name != laddr.nn ||
3323 lport->localport.port_name != laddr.pn)
3324 continue;
3325
3326 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3327 if (rport->remoteport.node_name != raddr.nn ||
3328 rport->remoteport.port_name != raddr.pn)
3329 continue;
3330
3331
3332 if (!nvme_fc_rport_get(rport))
3333 break;
3334
3335 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3336
3337 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3338 if (IS_ERR(ctrl))
3339 nvme_fc_rport_put(rport);
3340 return ctrl;
3341 }
3342 }
3343 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3344
3345 pr_warn("%s: %s - %s combination not found\n",
3346 __func__, opts->traddr, opts->host_traddr);
3347 return ERR_PTR(-ENOENT);
3348}
3349
3350
3351static struct nvmf_transport_ops nvme_fc_transport = {
3352 .name = "fc",
3353 .module = THIS_MODULE,
3354 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3355 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3356 .create_ctrl = nvme_fc_create_ctrl,
3357};
3358
3359
3360#define DISCOVERY_MAX_FAIL 20
3361
3362static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3363 struct device_attribute *attr, const char *buf, size_t count)
3364{
3365 unsigned long flags;
3366 LIST_HEAD(local_disc_list);
3367 struct nvme_fc_lport *lport;
3368 struct nvme_fc_rport *rport;
3369 int failcnt = 0;
3370
3371 spin_lock_irqsave(&nvme_fc_lock, flags);
3372restart:
3373 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3374 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3375 if (!nvme_fc_lport_get(lport))
3376 continue;
3377 if (!nvme_fc_rport_get(rport)) {
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 nvme_fc_lport_put(lport);
3388
3389 if (failcnt++ < DISCOVERY_MAX_FAIL)
3390 goto restart;
3391
3392 pr_err("nvme_discovery: too many reference "
3393 "failures\n");
3394 goto process_local_list;
3395 }
3396 if (list_empty(&rport->disc_list))
3397 list_add_tail(&rport->disc_list,
3398 &local_disc_list);
3399 }
3400 }
3401
3402process_local_list:
3403 while (!list_empty(&local_disc_list)) {
3404 rport = list_first_entry(&local_disc_list,
3405 struct nvme_fc_rport, disc_list);
3406 list_del_init(&rport->disc_list);
3407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3408
3409 lport = rport->lport;
3410
3411 nvme_fc_signal_discovery_scan(lport, rport);
3412 nvme_fc_rport_put(rport);
3413 nvme_fc_lport_put(lport);
3414
3415 spin_lock_irqsave(&nvme_fc_lock, flags);
3416 }
3417 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3418
3419 return count;
3420}
3421static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3422
3423static struct attribute *nvme_fc_attrs[] = {
3424 &dev_attr_nvme_discovery.attr,
3425 NULL
3426};
3427
3428static struct attribute_group nvme_fc_attr_group = {
3429 .attrs = nvme_fc_attrs,
3430};
3431
3432static const struct attribute_group *nvme_fc_attr_groups[] = {
3433 &nvme_fc_attr_group,
3434 NULL
3435};
3436
3437static struct class fc_class = {
3438 .name = "fc",
3439 .dev_groups = nvme_fc_attr_groups,
3440 .owner = THIS_MODULE,
3441};
3442
3443static int __init nvme_fc_init_module(void)
3444{
3445 int ret;
3446
3447 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3448 if (!nvme_fc_wq)
3449 return -ENOMEM;
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465 ret = class_register(&fc_class);
3466 if (ret) {
3467 pr_err("couldn't register class fc\n");
3468 goto out_destroy_wq;
3469 }
3470
3471
3472
3473
3474 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3475 "fc_udev_device");
3476 if (IS_ERR(fc_udev_device)) {
3477 pr_err("couldn't create fc_udev device!\n");
3478 ret = PTR_ERR(fc_udev_device);
3479 goto out_destroy_class;
3480 }
3481
3482 ret = nvmf_register_transport(&nvme_fc_transport);
3483 if (ret)
3484 goto out_destroy_device;
3485
3486 return 0;
3487
3488out_destroy_device:
3489 device_destroy(&fc_class, MKDEV(0, 0));
3490out_destroy_class:
3491 class_unregister(&fc_class);
3492out_destroy_wq:
3493 destroy_workqueue(nvme_fc_wq);
3494
3495 return ret;
3496}
3497
3498static void
3499nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3500{
3501 struct nvme_fc_ctrl *ctrl;
3502
3503 spin_lock(&rport->lock);
3504 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3505 dev_warn(ctrl->ctrl.device,
3506 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3507 ctrl->cnum);
3508 nvme_delete_ctrl(&ctrl->ctrl);
3509 }
3510 spin_unlock(&rport->lock);
3511}
3512
3513static void
3514nvme_fc_cleanup_for_unload(void)
3515{
3516 struct nvme_fc_lport *lport;
3517 struct nvme_fc_rport *rport;
3518
3519 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3520 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3521 nvme_fc_delete_controllers(rport);
3522 }
3523 }
3524}
3525
3526static void __exit nvme_fc_exit_module(void)
3527{
3528 unsigned long flags;
3529 bool need_cleanup = false;
3530
3531 spin_lock_irqsave(&nvme_fc_lock, flags);
3532 nvme_fc_waiting_to_unload = true;
3533 if (!list_empty(&nvme_fc_lport_list)) {
3534 need_cleanup = true;
3535 nvme_fc_cleanup_for_unload();
3536 }
3537 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3538 if (need_cleanup) {
3539 pr_info("%s: waiting for ctlr deletes\n", __func__);
3540 wait_for_completion(&nvme_fc_unload_proceed);
3541 pr_info("%s: ctrl deletes complete\n", __func__);
3542 }
3543
3544 nvmf_unregister_transport(&nvme_fc_transport);
3545
3546 ida_destroy(&nvme_fc_local_port_cnt);
3547 ida_destroy(&nvme_fc_ctrl_cnt);
3548
3549 device_destroy(&fc_class, MKDEV(0, 0));
3550 class_unregister(&fc_class);
3551 destroy_workqueue(nvme_fc_wq);
3552}
3553
3554module_init(nvme_fc_init_module);
3555module_exit(nvme_fc_exit_module);
3556
3557MODULE_LICENSE("GPL v2");
3558