1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22#include <linux/delay.h>
23
24#include "nvme.h"
25#include "fabrics.h"
26#include <linux/nvme-fc-driver.h>
27#include <linux/nvme-fc.h>
28
29
30
31
32
33enum nvme_fc_queue_flags {
34 NVME_FC_Q_CONNECTED = 0,
35 NVME_FC_Q_LIVE,
36};
37
38#define NVMEFC_QUEUE_DELAY 3
39
40#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
41
42struct nvme_fc_queue {
43 struct nvme_fc_ctrl *ctrl;
44 struct device *dev;
45 struct blk_mq_hw_ctx *hctx;
46 void *lldd_handle;
47 size_t cmnd_capsule_len;
48 u32 qnum;
49 u32 rqcnt;
50 u32 seqno;
51
52 u64 connection_id;
53 atomic_t csn;
54
55 unsigned long flags;
56} __aligned(sizeof(u64));
57
58enum nvme_fcop_flags {
59 FCOP_FLAGS_TERMIO = (1 << 0),
60 FCOP_FLAGS_RELEASED = (1 << 1),
61 FCOP_FLAGS_COMPLETE = (1 << 2),
62 FCOP_FLAGS_AEN = (1 << 3),
63};
64
65struct nvmefc_ls_req_op {
66 struct nvmefc_ls_req ls_req;
67
68 struct nvme_fc_rport *rport;
69 struct nvme_fc_queue *queue;
70 struct request *rq;
71 u32 flags;
72
73 int ls_error;
74 struct completion ls_done;
75 struct list_head lsreq_list;
76 bool req_queued;
77};
78
79enum nvme_fcpop_state {
80 FCPOP_STATE_UNINIT = 0,
81 FCPOP_STATE_IDLE = 1,
82 FCPOP_STATE_ACTIVE = 2,
83 FCPOP_STATE_ABORTED = 3,
84 FCPOP_STATE_COMPLETE = 4,
85};
86
87struct nvme_fc_fcp_op {
88 struct nvme_request nreq;
89
90
91
92
93
94
95
96 struct nvmefc_fcp_req fcp_req;
97
98 struct nvme_fc_ctrl *ctrl;
99 struct nvme_fc_queue *queue;
100 struct request *rq;
101
102 atomic_t state;
103 u32 flags;
104 u32 rqno;
105 u32 nents;
106
107 struct nvme_fc_cmd_iu cmd_iu;
108 struct nvme_fc_ersp_iu rsp_iu;
109};
110
111struct nvme_fc_lport {
112 struct nvme_fc_local_port localport;
113
114 struct ida endp_cnt;
115 struct list_head port_list;
116 struct list_head endp_list;
117 struct device *dev;
118 struct nvme_fc_port_template *ops;
119 struct kref ref;
120 atomic_t act_rport_cnt;
121} __aligned(sizeof(u64));
122
123struct nvme_fc_rport {
124 struct nvme_fc_remote_port remoteport;
125
126 struct list_head endp_list;
127 struct list_head ctrl_list;
128 struct list_head ls_req_list;
129 struct device *dev;
130 struct nvme_fc_lport *lport;
131 spinlock_t lock;
132 struct kref ref;
133 atomic_t act_ctrl_cnt;
134 unsigned long dev_loss_end;
135} __aligned(sizeof(u64));
136
137enum nvme_fcctrl_flags {
138 FCCTRL_TERMIO = (1 << 0),
139};
140
141struct nvme_fc_ctrl {
142 spinlock_t lock;
143 struct nvme_fc_queue *queues;
144 struct device *dev;
145 struct nvme_fc_lport *lport;
146 struct nvme_fc_rport *rport;
147 u32 cnum;
148
149 bool assoc_active;
150 u64 association_id;
151
152 struct list_head ctrl_list;
153
154 struct blk_mq_tag_set admin_tag_set;
155 struct blk_mq_tag_set tag_set;
156
157 struct delayed_work connect_work;
158
159 struct kref ref;
160 u32 flags;
161 u32 iocnt;
162 wait_queue_head_t ioabort_wait;
163
164 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
165
166 struct nvme_ctrl ctrl;
167};
168
169static inline struct nvme_fc_ctrl *
170to_fc_ctrl(struct nvme_ctrl *ctrl)
171{
172 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
173}
174
175static inline struct nvme_fc_lport *
176localport_to_lport(struct nvme_fc_local_port *portptr)
177{
178 return container_of(portptr, struct nvme_fc_lport, localport);
179}
180
181static inline struct nvme_fc_rport *
182remoteport_to_rport(struct nvme_fc_remote_port *portptr)
183{
184 return container_of(portptr, struct nvme_fc_rport, remoteport);
185}
186
187static inline struct nvmefc_ls_req_op *
188ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
189{
190 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
191}
192
193static inline struct nvme_fc_fcp_op *
194fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
195{
196 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
197}
198
199
200
201
202
203
204static DEFINE_SPINLOCK(nvme_fc_lock);
205
206static LIST_HEAD(nvme_fc_lport_list);
207static DEFINE_IDA(nvme_fc_local_port_cnt);
208static DEFINE_IDA(nvme_fc_ctrl_cnt);
209
210
211
212
213
214
215
216static struct class *fc_class;
217static struct device *fc_udev_device;
218
219
220
221
222static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
223 struct nvme_fc_queue *, unsigned int);
224
225static void
226nvme_fc_free_lport(struct kref *ref)
227{
228 struct nvme_fc_lport *lport =
229 container_of(ref, struct nvme_fc_lport, ref);
230 unsigned long flags;
231
232 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
233 WARN_ON(!list_empty(&lport->endp_list));
234
235
236 spin_lock_irqsave(&nvme_fc_lock, flags);
237 list_del(&lport->port_list);
238 spin_unlock_irqrestore(&nvme_fc_lock, flags);
239
240 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
241 ida_destroy(&lport->endp_cnt);
242
243 put_device(lport->dev);
244
245 kfree(lport);
246}
247
248static void
249nvme_fc_lport_put(struct nvme_fc_lport *lport)
250{
251 kref_put(&lport->ref, nvme_fc_free_lport);
252}
253
254static int
255nvme_fc_lport_get(struct nvme_fc_lport *lport)
256{
257 return kref_get_unless_zero(&lport->ref);
258}
259
260
261static struct nvme_fc_lport *
262nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
263 struct nvme_fc_port_template *ops,
264 struct device *dev)
265{
266 struct nvme_fc_lport *lport;
267 unsigned long flags;
268
269 spin_lock_irqsave(&nvme_fc_lock, flags);
270
271 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
272 if (lport->localport.node_name != pinfo->node_name ||
273 lport->localport.port_name != pinfo->port_name)
274 continue;
275
276 if (lport->dev != dev) {
277 lport = ERR_PTR(-EXDEV);
278 goto out_done;
279 }
280
281 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
282 lport = ERR_PTR(-EEXIST);
283 goto out_done;
284 }
285
286 if (!nvme_fc_lport_get(lport)) {
287
288
289
290
291 lport = NULL;
292 goto out_done;
293 }
294
295
296
297 lport->ops = ops;
298 lport->localport.port_role = pinfo->port_role;
299 lport->localport.port_id = pinfo->port_id;
300 lport->localport.port_state = FC_OBJSTATE_ONLINE;
301
302 spin_unlock_irqrestore(&nvme_fc_lock, flags);
303
304 return lport;
305 }
306
307 lport = NULL;
308
309out_done:
310 spin_unlock_irqrestore(&nvme_fc_lock, flags);
311
312 return lport;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332int
333nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
334 struct nvme_fc_port_template *template,
335 struct device *dev,
336 struct nvme_fc_local_port **portptr)
337{
338 struct nvme_fc_lport *newrec;
339 unsigned long flags;
340 int ret, idx;
341
342 if (!template->localport_delete || !template->remoteport_delete ||
343 !template->ls_req || !template->fcp_io ||
344 !template->ls_abort || !template->fcp_abort ||
345 !template->max_hw_queues || !template->max_sgl_segments ||
346 !template->max_dif_sgl_segments || !template->dma_boundary) {
347 ret = -EINVAL;
348 goto out_reghost_failed;
349 }
350
351
352
353
354
355
356
357
358 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
359
360
361 if (IS_ERR(newrec)) {
362 ret = PTR_ERR(newrec);
363 goto out_reghost_failed;
364
365
366 } else if (newrec) {
367 *portptr = &newrec->localport;
368 return 0;
369 }
370
371
372
373 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
374 GFP_KERNEL);
375 if (!newrec) {
376 ret = -ENOMEM;
377 goto out_reghost_failed;
378 }
379
380 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
381 if (idx < 0) {
382 ret = -ENOSPC;
383 goto out_fail_kfree;
384 }
385
386 if (!get_device(dev) && dev) {
387 ret = -ENODEV;
388 goto out_ida_put;
389 }
390
391 INIT_LIST_HEAD(&newrec->port_list);
392 INIT_LIST_HEAD(&newrec->endp_list);
393 kref_init(&newrec->ref);
394 atomic_set(&newrec->act_rport_cnt, 0);
395 newrec->ops = template;
396 newrec->dev = dev;
397 ida_init(&newrec->endp_cnt);
398 newrec->localport.private = &newrec[1];
399 newrec->localport.node_name = pinfo->node_name;
400 newrec->localport.port_name = pinfo->port_name;
401 newrec->localport.port_role = pinfo->port_role;
402 newrec->localport.port_id = pinfo->port_id;
403 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
404 newrec->localport.port_num = idx;
405
406 spin_lock_irqsave(&nvme_fc_lock, flags);
407 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
408 spin_unlock_irqrestore(&nvme_fc_lock, flags);
409
410 if (dev)
411 dma_set_seg_boundary(dev, template->dma_boundary);
412
413 *portptr = &newrec->localport;
414 return 0;
415
416out_ida_put:
417 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
418out_fail_kfree:
419 kfree(newrec);
420out_reghost_failed:
421 *portptr = NULL;
422
423 return ret;
424}
425EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
426
427
428
429
430
431
432
433
434
435
436
437
438int
439nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
440{
441 struct nvme_fc_lport *lport = localport_to_lport(portptr);
442 unsigned long flags;
443
444 if (!portptr)
445 return -EINVAL;
446
447 spin_lock_irqsave(&nvme_fc_lock, flags);
448
449 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
450 spin_unlock_irqrestore(&nvme_fc_lock, flags);
451 return -EINVAL;
452 }
453 portptr->port_state = FC_OBJSTATE_DELETED;
454
455 spin_unlock_irqrestore(&nvme_fc_lock, flags);
456
457 if (atomic_read(&lport->act_rport_cnt) == 0)
458 lport->ops->localport_delete(&lport->localport);
459
460 nvme_fc_lport_put(lport);
461
462 return 0;
463}
464EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
465
466
467
468
469
470
471
472
473
474#define FCNVME_TRADDR_LENGTH 64
475
476static void
477nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
478 struct nvme_fc_rport *rport)
479{
480 char hostaddr[FCNVME_TRADDR_LENGTH];
481 char tgtaddr[FCNVME_TRADDR_LENGTH];
482 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
483
484 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
485 return;
486
487 snprintf(hostaddr, sizeof(hostaddr),
488 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
489 lport->localport.node_name, lport->localport.port_name);
490 snprintf(tgtaddr, sizeof(tgtaddr),
491 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
492 rport->remoteport.node_name, rport->remoteport.port_name);
493 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
494}
495
496static void
497nvme_fc_free_rport(struct kref *ref)
498{
499 struct nvme_fc_rport *rport =
500 container_of(ref, struct nvme_fc_rport, ref);
501 struct nvme_fc_lport *lport =
502 localport_to_lport(rport->remoteport.localport);
503 unsigned long flags;
504
505 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
506 WARN_ON(!list_empty(&rport->ctrl_list));
507
508
509 spin_lock_irqsave(&nvme_fc_lock, flags);
510 list_del(&rport->endp_list);
511 spin_unlock_irqrestore(&nvme_fc_lock, flags);
512
513 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
514
515 kfree(rport);
516
517 nvme_fc_lport_put(lport);
518}
519
520static void
521nvme_fc_rport_put(struct nvme_fc_rport *rport)
522{
523 kref_put(&rport->ref, nvme_fc_free_rport);
524}
525
526static int
527nvme_fc_rport_get(struct nvme_fc_rport *rport)
528{
529 return kref_get_unless_zero(&rport->ref);
530}
531
532static void
533nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
534{
535 switch (ctrl->ctrl.state) {
536 case NVME_CTRL_NEW:
537 case NVME_CTRL_RECONNECTING:
538
539
540
541
542 dev_info(ctrl->ctrl.device,
543 "NVME-FC{%d}: connectivity re-established. "
544 "Attempting reconnect\n", ctrl->cnum);
545
546 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
547 break;
548
549 case NVME_CTRL_RESETTING:
550
551
552
553
554
555 break;
556
557 default:
558
559 break;
560 }
561}
562
563static struct nvme_fc_rport *
564nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
565 struct nvme_fc_port_info *pinfo)
566{
567 struct nvme_fc_rport *rport;
568 struct nvme_fc_ctrl *ctrl;
569 unsigned long flags;
570
571 spin_lock_irqsave(&nvme_fc_lock, flags);
572
573 list_for_each_entry(rport, &lport->endp_list, endp_list) {
574 if (rport->remoteport.node_name != pinfo->node_name ||
575 rport->remoteport.port_name != pinfo->port_name)
576 continue;
577
578 if (!nvme_fc_rport_get(rport)) {
579 rport = ERR_PTR(-ENOLCK);
580 goto out_done;
581 }
582
583 spin_unlock_irqrestore(&nvme_fc_lock, flags);
584
585 spin_lock_irqsave(&rport->lock, flags);
586
587
588 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
589
590 spin_unlock_irqrestore(&rport->lock, flags);
591 nvme_fc_rport_put(rport);
592 return ERR_PTR(-ESTALE);
593 }
594
595 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
596 rport->dev_loss_end = 0;
597
598
599
600
601
602 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
603 nvme_fc_resume_controller(ctrl);
604
605 spin_unlock_irqrestore(&rport->lock, flags);
606
607 return rport;
608 }
609
610 rport = NULL;
611
612out_done:
613 spin_unlock_irqrestore(&nvme_fc_lock, flags);
614
615 return rport;
616}
617
618static inline void
619__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
620 struct nvme_fc_port_info *pinfo)
621{
622 if (pinfo->dev_loss_tmo)
623 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
624 else
625 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644int
645nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
646 struct nvme_fc_port_info *pinfo,
647 struct nvme_fc_remote_port **portptr)
648{
649 struct nvme_fc_lport *lport = localport_to_lport(localport);
650 struct nvme_fc_rport *newrec;
651 unsigned long flags;
652 int ret, idx;
653
654 if (!nvme_fc_lport_get(lport)) {
655 ret = -ESHUTDOWN;
656 goto out_reghost_failed;
657 }
658
659
660
661
662
663
664 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
665
666
667 if (IS_ERR(newrec)) {
668 ret = PTR_ERR(newrec);
669 goto out_lport_put;
670
671
672 } else if (newrec) {
673 nvme_fc_lport_put(lport);
674 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
675 nvme_fc_signal_discovery_scan(lport, newrec);
676 *portptr = &newrec->remoteport;
677 return 0;
678 }
679
680
681
682 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
683 GFP_KERNEL);
684 if (!newrec) {
685 ret = -ENOMEM;
686 goto out_lport_put;
687 }
688
689 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
690 if (idx < 0) {
691 ret = -ENOSPC;
692 goto out_kfree_rport;
693 }
694
695 INIT_LIST_HEAD(&newrec->endp_list);
696 INIT_LIST_HEAD(&newrec->ctrl_list);
697 INIT_LIST_HEAD(&newrec->ls_req_list);
698 kref_init(&newrec->ref);
699 atomic_set(&newrec->act_ctrl_cnt, 0);
700 spin_lock_init(&newrec->lock);
701 newrec->remoteport.localport = &lport->localport;
702 newrec->dev = lport->dev;
703 newrec->lport = lport;
704 newrec->remoteport.private = &newrec[1];
705 newrec->remoteport.port_role = pinfo->port_role;
706 newrec->remoteport.node_name = pinfo->node_name;
707 newrec->remoteport.port_name = pinfo->port_name;
708 newrec->remoteport.port_id = pinfo->port_id;
709 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
710 newrec->remoteport.port_num = idx;
711 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
712
713 spin_lock_irqsave(&nvme_fc_lock, flags);
714 list_add_tail(&newrec->endp_list, &lport->endp_list);
715 spin_unlock_irqrestore(&nvme_fc_lock, flags);
716
717 nvme_fc_signal_discovery_scan(lport, newrec);
718
719 *portptr = &newrec->remoteport;
720 return 0;
721
722out_kfree_rport:
723 kfree(newrec);
724out_lport_put:
725 nvme_fc_lport_put(lport);
726out_reghost_failed:
727 *portptr = NULL;
728 return ret;
729}
730EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
731
732static int
733nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
734{
735 struct nvmefc_ls_req_op *lsop;
736 unsigned long flags;
737
738restart:
739 spin_lock_irqsave(&rport->lock, flags);
740
741 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
742 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
743 lsop->flags |= FCOP_FLAGS_TERMIO;
744 spin_unlock_irqrestore(&rport->lock, flags);
745 rport->lport->ops->ls_abort(&rport->lport->localport,
746 &rport->remoteport,
747 &lsop->ls_req);
748 goto restart;
749 }
750 }
751 spin_unlock_irqrestore(&rport->lock, flags);
752
753 return 0;
754}
755
756static void
757nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
758{
759 dev_info(ctrl->ctrl.device,
760 "NVME-FC{%d}: controller connectivity lost. Awaiting "
761 "Reconnect", ctrl->cnum);
762
763 switch (ctrl->ctrl.state) {
764 case NVME_CTRL_NEW:
765 case NVME_CTRL_LIVE:
766
767
768
769
770
771
772
773 if (nvme_reset_ctrl(&ctrl->ctrl)) {
774 dev_warn(ctrl->ctrl.device,
775 "NVME-FC{%d}: Couldn't schedule reset. "
776 "Deleting controller.\n",
777 ctrl->cnum);
778 nvme_delete_ctrl(&ctrl->ctrl);
779 }
780 break;
781
782 case NVME_CTRL_RECONNECTING:
783
784
785
786
787
788
789
790 break;
791
792 case NVME_CTRL_RESETTING:
793
794
795
796
797
798
799 break;
800
801 case NVME_CTRL_DELETING:
802 default:
803
804 break;
805 }
806}
807
808
809
810
811
812
813
814
815
816
817
818
819int
820nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
821{
822 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
823 struct nvme_fc_ctrl *ctrl;
824 unsigned long flags;
825
826 if (!portptr)
827 return -EINVAL;
828
829 spin_lock_irqsave(&rport->lock, flags);
830
831 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
832 spin_unlock_irqrestore(&rport->lock, flags);
833 return -EINVAL;
834 }
835 portptr->port_state = FC_OBJSTATE_DELETED;
836
837 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
838
839 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
840
841 if (!portptr->dev_loss_tmo) {
842 dev_warn(ctrl->ctrl.device,
843 "NVME-FC{%d}: controller connectivity lost. "
844 "Deleting controller.\n",
845 ctrl->cnum);
846 nvme_delete_ctrl(&ctrl->ctrl);
847 } else
848 nvme_fc_ctrl_connectivity_loss(ctrl);
849 }
850
851 spin_unlock_irqrestore(&rport->lock, flags);
852
853 nvme_fc_abort_lsops(rport);
854
855 if (atomic_read(&rport->act_ctrl_cnt) == 0)
856 rport->lport->ops->remoteport_delete(portptr);
857
858
859
860
861
862
863 nvme_fc_rport_put(rport);
864
865 return 0;
866}
867EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
868
869
870
871
872
873
874
875
876
877void
878nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
879{
880 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
881
882 nvme_fc_signal_discovery_scan(rport->lport, rport);
883}
884EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
885
886int
887nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
888 u32 dev_loss_tmo)
889{
890 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
891 unsigned long flags;
892
893 spin_lock_irqsave(&rport->lock, flags);
894
895 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
896 spin_unlock_irqrestore(&rport->lock, flags);
897 return -EINVAL;
898 }
899
900
901 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
902
903 spin_unlock_irqrestore(&rport->lock, flags);
904
905 return 0;
906}
907EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928static inline dma_addr_t
929fc_dma_map_single(struct device *dev, void *ptr, size_t size,
930 enum dma_data_direction dir)
931{
932 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
933}
934
935static inline int
936fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
937{
938 return dev ? dma_mapping_error(dev, dma_addr) : 0;
939}
940
941static inline void
942fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
943 enum dma_data_direction dir)
944{
945 if (dev)
946 dma_unmap_single(dev, addr, size, dir);
947}
948
949static inline void
950fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
951 enum dma_data_direction dir)
952{
953 if (dev)
954 dma_sync_single_for_cpu(dev, addr, size, dir);
955}
956
957static inline void
958fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
959 enum dma_data_direction dir)
960{
961 if (dev)
962 dma_sync_single_for_device(dev, addr, size, dir);
963}
964
965
966static int
967fc_map_sg(struct scatterlist *sg, int nents)
968{
969 struct scatterlist *s;
970 int i;
971
972 WARN_ON(nents == 0 || sg[0].length == 0);
973
974 for_each_sg(sg, s, nents, i) {
975 s->dma_address = 0L;
976#ifdef CONFIG_NEED_SG_DMA_LENGTH
977 s->dma_length = s->length;
978#endif
979 }
980 return nents;
981}
982
983static inline int
984fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
985 enum dma_data_direction dir)
986{
987 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
988}
989
990static inline void
991fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
992 enum dma_data_direction dir)
993{
994 if (dev)
995 dma_unmap_sg(dev, sg, nents, dir);
996}
997
998
999
1000static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1001static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1002
1003
1004static void
1005__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1006{
1007 struct nvme_fc_rport *rport = lsop->rport;
1008 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&rport->lock, flags);
1012
1013 if (!lsop->req_queued) {
1014 spin_unlock_irqrestore(&rport->lock, flags);
1015 return;
1016 }
1017
1018 list_del(&lsop->lsreq_list);
1019
1020 lsop->req_queued = false;
1021
1022 spin_unlock_irqrestore(&rport->lock, flags);
1023
1024 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1025 (lsreq->rqstlen + lsreq->rsplen),
1026 DMA_BIDIRECTIONAL);
1027
1028 nvme_fc_rport_put(rport);
1029}
1030
1031static int
1032__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1033 struct nvmefc_ls_req_op *lsop,
1034 void (*done)(struct nvmefc_ls_req *req, int status))
1035{
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1038 int ret = 0;
1039
1040 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1041 return -ECONNREFUSED;
1042
1043 if (!nvme_fc_rport_get(rport))
1044 return -ESHUTDOWN;
1045
1046 lsreq->done = done;
1047 lsop->rport = rport;
1048 lsop->req_queued = false;
1049 INIT_LIST_HEAD(&lsop->lsreq_list);
1050 init_completion(&lsop->ls_done);
1051
1052 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1053 lsreq->rqstlen + lsreq->rsplen,
1054 DMA_BIDIRECTIONAL);
1055 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1056 ret = -EFAULT;
1057 goto out_putrport;
1058 }
1059 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1060
1061 spin_lock_irqsave(&rport->lock, flags);
1062
1063 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1064
1065 lsop->req_queued = true;
1066
1067 spin_unlock_irqrestore(&rport->lock, flags);
1068
1069 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1070 &rport->remoteport, lsreq);
1071 if (ret)
1072 goto out_unlink;
1073
1074 return 0;
1075
1076out_unlink:
1077 lsop->ls_error = ret;
1078 spin_lock_irqsave(&rport->lock, flags);
1079 lsop->req_queued = false;
1080 list_del(&lsop->lsreq_list);
1081 spin_unlock_irqrestore(&rport->lock, flags);
1082 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1083 (lsreq->rqstlen + lsreq->rsplen),
1084 DMA_BIDIRECTIONAL);
1085out_putrport:
1086 nvme_fc_rport_put(rport);
1087
1088 return ret;
1089}
1090
1091static void
1092nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1093{
1094 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1095
1096 lsop->ls_error = status;
1097 complete(&lsop->ls_done);
1098}
1099
1100static int
1101nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1102{
1103 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1104 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1105 int ret;
1106
1107 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1108
1109 if (!ret) {
1110
1111
1112
1113
1114
1115
1116 wait_for_completion(&lsop->ls_done);
1117
1118 __nvme_fc_finish_ls_req(lsop);
1119
1120 ret = lsop->ls_error;
1121 }
1122
1123 if (ret)
1124 return ret;
1125
1126
1127 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1128 return -ENXIO;
1129
1130 return 0;
1131}
1132
1133static int
1134nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1135 struct nvmefc_ls_req_op *lsop,
1136 void (*done)(struct nvmefc_ls_req *req, int status))
1137{
1138
1139
1140 return __nvme_fc_send_ls_req(rport, lsop, done);
1141}
1142
1143
1144enum {
1145 VERR_NO_ERROR = 0,
1146 VERR_LSACC = 1,
1147 VERR_LSDESC_RQST = 2,
1148 VERR_LSDESC_RQST_LEN = 3,
1149 VERR_ASSOC_ID = 4,
1150 VERR_ASSOC_ID_LEN = 5,
1151 VERR_CONN_ID = 6,
1152 VERR_CONN_ID_LEN = 7,
1153 VERR_CR_ASSOC = 8,
1154 VERR_CR_ASSOC_ACC_LEN = 9,
1155 VERR_CR_CONN = 10,
1156 VERR_CR_CONN_ACC_LEN = 11,
1157 VERR_DISCONN = 12,
1158 VERR_DISCONN_ACC_LEN = 13,
1159};
1160
1161static char *validation_errors[] = {
1162 "OK",
1163 "Not LS_ACC",
1164 "Not LSDESC_RQST",
1165 "Bad LSDESC_RQST Length",
1166 "Not Association ID",
1167 "Bad Association ID Length",
1168 "Not Connection ID",
1169 "Bad Connection ID Length",
1170 "Not CR_ASSOC Rqst",
1171 "Bad CR_ASSOC ACC Length",
1172 "Not CR_CONN Rqst",
1173 "Bad CR_CONN ACC Length",
1174 "Not Disconnect Rqst",
1175 "Bad Disconnect ACC Length",
1176};
1177
1178static int
1179nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1180 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1181{
1182 struct nvmefc_ls_req_op *lsop;
1183 struct nvmefc_ls_req *lsreq;
1184 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1185 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1186 int ret, fcret = 0;
1187
1188 lsop = kzalloc((sizeof(*lsop) +
1189 ctrl->lport->ops->lsrqst_priv_sz +
1190 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1191 if (!lsop) {
1192 ret = -ENOMEM;
1193 goto out_no_memory;
1194 }
1195 lsreq = &lsop->ls_req;
1196
1197 lsreq->private = (void *)&lsop[1];
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1199 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1200 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1201
1202 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1203 assoc_rqst->desc_list_len =
1204 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1205
1206 assoc_rqst->assoc_cmd.desc_tag =
1207 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1208 assoc_rqst->assoc_cmd.desc_len =
1209 fcnvme_lsdesc_len(
1210 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1211
1212 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1213 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
1214
1215 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1216 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1217 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1218 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1219 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1220 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1221
1222 lsop->queue = queue;
1223 lsreq->rqstaddr = assoc_rqst;
1224 lsreq->rqstlen = sizeof(*assoc_rqst);
1225 lsreq->rspaddr = assoc_acc;
1226 lsreq->rsplen = sizeof(*assoc_acc);
1227 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1228
1229 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1230 if (ret)
1231 goto out_free_buffer;
1232
1233
1234
1235
1236 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1237 fcret = VERR_LSACC;
1238 else if (assoc_acc->hdr.desc_list_len !=
1239 fcnvme_lsdesc_len(
1240 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1241 fcret = VERR_CR_ASSOC_ACC_LEN;
1242 else if (assoc_acc->hdr.rqst.desc_tag !=
1243 cpu_to_be32(FCNVME_LSDESC_RQST))
1244 fcret = VERR_LSDESC_RQST;
1245 else if (assoc_acc->hdr.rqst.desc_len !=
1246 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1247 fcret = VERR_LSDESC_RQST_LEN;
1248 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1249 fcret = VERR_CR_ASSOC;
1250 else if (assoc_acc->associd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1252 fcret = VERR_ASSOC_ID;
1253 else if (assoc_acc->associd.desc_len !=
1254 fcnvme_lsdesc_len(
1255 sizeof(struct fcnvme_lsdesc_assoc_id)))
1256 fcret = VERR_ASSOC_ID_LEN;
1257 else if (assoc_acc->connectid.desc_tag !=
1258 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1259 fcret = VERR_CONN_ID;
1260 else if (assoc_acc->connectid.desc_len !=
1261 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1262 fcret = VERR_CONN_ID_LEN;
1263
1264 if (fcret) {
1265 ret = -EBADF;
1266 dev_err(ctrl->dev,
1267 "q %d connect failed: %s\n",
1268 queue->qnum, validation_errors[fcret]);
1269 } else {
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1275 }
1276
1277out_free_buffer:
1278 kfree(lsop);
1279out_no_memory:
1280 if (ret)
1281 dev_err(ctrl->dev,
1282 "queue %d connect admin queue failed (%d).\n",
1283 queue->qnum, ret);
1284 return ret;
1285}
1286
1287static int
1288nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1289 u16 qsize, u16 ersp_ratio)
1290{
1291 struct nvmefc_ls_req_op *lsop;
1292 struct nvmefc_ls_req *lsreq;
1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1294 struct fcnvme_ls_cr_conn_acc *conn_acc;
1295 int ret, fcret = 0;
1296
1297 lsop = kzalloc((sizeof(*lsop) +
1298 ctrl->lport->ops->lsrqst_priv_sz +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1300 if (!lsop) {
1301 ret = -ENOMEM;
1302 goto out_no_memory;
1303 }
1304 lsreq = &lsop->ls_req;
1305
1306 lsreq->private = (void *)&lsop[1];
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1308 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1310
1311 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1312 conn_rqst->desc_list_len = cpu_to_be32(
1313 sizeof(struct fcnvme_lsdesc_assoc_id) +
1314 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1315
1316 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1317 conn_rqst->associd.desc_len =
1318 fcnvme_lsdesc_len(
1319 sizeof(struct fcnvme_lsdesc_assoc_id));
1320 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1321 conn_rqst->connect_cmd.desc_tag =
1322 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1323 conn_rqst->connect_cmd.desc_len =
1324 fcnvme_lsdesc_len(
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1327 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1328 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
1329
1330 lsop->queue = queue;
1331 lsreq->rqstaddr = conn_rqst;
1332 lsreq->rqstlen = sizeof(*conn_rqst);
1333 lsreq->rspaddr = conn_acc;
1334 lsreq->rsplen = sizeof(*conn_acc);
1335 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1336
1337 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1338 if (ret)
1339 goto out_free_buffer;
1340
1341
1342
1343
1344 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1345 fcret = VERR_LSACC;
1346 else if (conn_acc->hdr.desc_list_len !=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1348 fcret = VERR_CR_CONN_ACC_LEN;
1349 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1350 fcret = VERR_LSDESC_RQST;
1351 else if (conn_acc->hdr.rqst.desc_len !=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1353 fcret = VERR_LSDESC_RQST_LEN;
1354 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1355 fcret = VERR_CR_CONN;
1356 else if (conn_acc->connectid.desc_tag !=
1357 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1358 fcret = VERR_CONN_ID;
1359 else if (conn_acc->connectid.desc_len !=
1360 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1361 fcret = VERR_CONN_ID_LEN;
1362
1363 if (fcret) {
1364 ret = -EBADF;
1365 dev_err(ctrl->dev,
1366 "q %d connect failed: %s\n",
1367 queue->qnum, validation_errors[fcret]);
1368 } else {
1369 queue->connection_id =
1370 be64_to_cpu(conn_acc->connectid.connection_id);
1371 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1372 }
1373
1374out_free_buffer:
1375 kfree(lsop);
1376out_no_memory:
1377 if (ret)
1378 dev_err(ctrl->dev,
1379 "queue %d connect command failed (%d).\n",
1380 queue->qnum, ret);
1381 return ret;
1382}
1383
1384static void
1385nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1386{
1387 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1388
1389 __nvme_fc_finish_ls_req(lsop);
1390
1391
1392
1393 kfree(lsop);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static void
1414nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1415{
1416 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1417 struct fcnvme_ls_disconnect_acc *discon_acc;
1418 struct nvmefc_ls_req_op *lsop;
1419 struct nvmefc_ls_req *lsreq;
1420 int ret;
1421
1422 lsop = kzalloc((sizeof(*lsop) +
1423 ctrl->lport->ops->lsrqst_priv_sz +
1424 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1425 GFP_KERNEL);
1426 if (!lsop)
1427
1428 return;
1429
1430 lsreq = &lsop->ls_req;
1431
1432 lsreq->private = (void *)&lsop[1];
1433 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1434 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1435 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1436
1437 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1438 discon_rqst->desc_list_len = cpu_to_be32(
1439 sizeof(struct fcnvme_lsdesc_assoc_id) +
1440 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1441
1442 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1443 discon_rqst->associd.desc_len =
1444 fcnvme_lsdesc_len(
1445 sizeof(struct fcnvme_lsdesc_assoc_id));
1446
1447 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1448
1449 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1450 FCNVME_LSDESC_DISCONN_CMD);
1451 discon_rqst->discon_cmd.desc_len =
1452 fcnvme_lsdesc_len(
1453 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1454 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1455 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1456
1457 lsreq->rqstaddr = discon_rqst;
1458 lsreq->rqstlen = sizeof(*discon_rqst);
1459 lsreq->rspaddr = discon_acc;
1460 lsreq->rsplen = sizeof(*discon_acc);
1461 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1462
1463 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1464 nvme_fc_disconnect_assoc_done);
1465 if (ret)
1466 kfree(lsop);
1467
1468
1469 ctrl->association_id = 0;
1470}
1471
1472
1473
1474
1475static void __nvme_fc_final_op_cleanup(struct request *rq);
1476static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1477
1478static int
1479nvme_fc_reinit_request(void *data, struct request *rq)
1480{
1481 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1482 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1483
1484 memset(cmdiu, 0, sizeof(*cmdiu));
1485 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1486 cmdiu->fc_id = NVME_CMD_FC_ID;
1487 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1488 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1489
1490 return 0;
1491}
1492
1493static void
1494__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1495 struct nvme_fc_fcp_op *op)
1496{
1497 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1498 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1499 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1500 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1501
1502 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1503}
1504
1505static void
1506nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1507 unsigned int hctx_idx)
1508{
1509 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1510
1511 return __nvme_fc_exit_request(set->driver_data, op);
1512}
1513
1514static int
1515__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1516{
1517 int state;
1518
1519 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1520 if (state != FCPOP_STATE_ACTIVE) {
1521 atomic_set(&op->state, state);
1522 return -ECANCELED;
1523 }
1524
1525 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1526 &ctrl->rport->remoteport,
1527 op->queue->lldd_handle,
1528 &op->fcp_req);
1529
1530 return 0;
1531}
1532
1533static void
1534nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1535{
1536 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1537 unsigned long flags;
1538 int i, ret;
1539
1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1541 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1542 continue;
1543
1544 spin_lock_irqsave(&ctrl->lock, flags);
1545 if (ctrl->flags & FCCTRL_TERMIO) {
1546 ctrl->iocnt++;
1547 aen_op->flags |= FCOP_FLAGS_TERMIO;
1548 }
1549 spin_unlock_irqrestore(&ctrl->lock, flags);
1550
1551 ret = __nvme_fc_abort_op(ctrl, aen_op);
1552 if (ret) {
1553
1554
1555
1556
1557
1558
1559
1560 spin_lock_irqsave(&ctrl->lock, flags);
1561 if (ctrl->flags & FCCTRL_TERMIO)
1562 ctrl->iocnt--;
1563 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1564 spin_unlock_irqrestore(&ctrl->lock, flags);
1565 return;
1566 }
1567 }
1568}
1569
1570static inline int
1571__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1572 struct nvme_fc_fcp_op *op)
1573{
1574 unsigned long flags;
1575 bool complete_rq = false;
1576
1577 spin_lock_irqsave(&ctrl->lock, flags);
1578 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1579 if (ctrl->flags & FCCTRL_TERMIO) {
1580 if (!--ctrl->iocnt)
1581 wake_up(&ctrl->ioabort_wait);
1582 }
1583 }
1584 if (op->flags & FCOP_FLAGS_RELEASED)
1585 complete_rq = true;
1586 else
1587 op->flags |= FCOP_FLAGS_COMPLETE;
1588 spin_unlock_irqrestore(&ctrl->lock, flags);
1589
1590 return complete_rq;
1591}
1592
1593static void
1594nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1595{
1596 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1597 struct request *rq = op->rq;
1598 struct nvmefc_fcp_req *freq = &op->fcp_req;
1599 struct nvme_fc_ctrl *ctrl = op->ctrl;
1600 struct nvme_fc_queue *queue = op->queue;
1601 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1602 struct nvme_command *sqe = &op->cmd_iu.sqe;
1603 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1604 union nvme_result result;
1605 bool terminate_assoc = true;
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1645 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1646
1647 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
1648 op->flags & FCOP_FLAGS_TERMIO)
1649 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1650 else if (freq->status)
1651 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1652
1653
1654
1655
1656
1657
1658 if (status)
1659 goto done;
1660
1661
1662
1663
1664
1665
1666
1667
1668 switch (freq->rcv_rsplen) {
1669
1670 case 0:
1671 case NVME_FC_SIZEOF_ZEROS_RSP:
1672
1673
1674
1675
1676
1677 if (freq->transferred_length !=
1678 be32_to_cpu(op->cmd_iu.data_len)) {
1679 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1680 goto done;
1681 }
1682 result.u64 = 0;
1683 break;
1684
1685 case sizeof(struct nvme_fc_ersp_iu):
1686
1687
1688
1689
1690 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1691 (freq->rcv_rsplen / 4) ||
1692 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1693 freq->transferred_length ||
1694 op->rsp_iu.status_code ||
1695 sqe->common.command_id != cqe->command_id)) {
1696 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1697 goto done;
1698 }
1699 result = cqe->result;
1700 status = cqe->status;
1701 break;
1702
1703 default:
1704 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1705 goto done;
1706 }
1707
1708 terminate_assoc = false;
1709
1710done:
1711 if (op->flags & FCOP_FLAGS_AEN) {
1712 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1713 __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1714 atomic_set(&op->state, FCPOP_STATE_IDLE);
1715 op->flags = FCOP_FLAGS_AEN;
1716 nvme_fc_ctrl_put(ctrl);
1717 goto check_error;
1718 }
1719
1720
1721
1722
1723
1724 if (status &&
1725 (blk_queue_dying(rq->q) ||
1726 ctrl->ctrl.state == NVME_CTRL_NEW ||
1727 ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
1728 status |= cpu_to_le16(NVME_SC_DNR << 1);
1729
1730 if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
1731 __nvme_fc_final_op_cleanup(rq);
1732 else
1733 nvme_end_request(rq, status, result);
1734
1735check_error:
1736 if (terminate_assoc)
1737 nvme_fc_error_recovery(ctrl, "transport detected io error");
1738}
1739
1740static int
1741__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1742 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1743 struct request *rq, u32 rqno)
1744{
1745 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1746 int ret = 0;
1747
1748 memset(op, 0, sizeof(*op));
1749 op->fcp_req.cmdaddr = &op->cmd_iu;
1750 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1751 op->fcp_req.rspaddr = &op->rsp_iu;
1752 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1753 op->fcp_req.done = nvme_fc_fcpio_done;
1754 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1755 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1756 op->ctrl = ctrl;
1757 op->queue = queue;
1758 op->rq = rq;
1759 op->rqno = rqno;
1760
1761 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1762 cmdiu->fc_id = NVME_CMD_FC_ID;
1763 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1764
1765 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1766 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1767 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1768 dev_err(ctrl->dev,
1769 "FCP Op failed - cmdiu dma mapping failed.\n");
1770 ret = EFAULT;
1771 goto out_on_error;
1772 }
1773
1774 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1775 &op->rsp_iu, sizeof(op->rsp_iu),
1776 DMA_FROM_DEVICE);
1777 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1778 dev_err(ctrl->dev,
1779 "FCP Op failed - rspiu dma mapping failed.\n");
1780 ret = EFAULT;
1781 }
1782
1783 atomic_set(&op->state, FCPOP_STATE_IDLE);
1784out_on_error:
1785 return ret;
1786}
1787
1788static int
1789nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1790 unsigned int hctx_idx, unsigned int numa_node)
1791{
1792 struct nvme_fc_ctrl *ctrl = set->driver_data;
1793 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1794 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1795 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1796
1797 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1798}
1799
1800static int
1801nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1802{
1803 struct nvme_fc_fcp_op *aen_op;
1804 struct nvme_fc_cmd_iu *cmdiu;
1805 struct nvme_command *sqe;
1806 void *private;
1807 int i, ret;
1808
1809 aen_op = ctrl->aen_ops;
1810 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1811 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1812 GFP_KERNEL);
1813 if (!private)
1814 return -ENOMEM;
1815
1816 cmdiu = &aen_op->cmd_iu;
1817 sqe = &cmdiu->sqe;
1818 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1819 aen_op, (struct request *)NULL,
1820 (NVME_AQ_BLK_MQ_DEPTH + i));
1821 if (ret) {
1822 kfree(private);
1823 return ret;
1824 }
1825
1826 aen_op->flags = FCOP_FLAGS_AEN;
1827 aen_op->fcp_req.first_sgl = NULL;
1828 aen_op->fcp_req.private = private;
1829
1830 memset(sqe, 0, sizeof(*sqe));
1831 sqe->common.opcode = nvme_admin_async_event;
1832
1833 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1834 }
1835 return 0;
1836}
1837
1838static void
1839nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1840{
1841 struct nvme_fc_fcp_op *aen_op;
1842 int i;
1843
1844 aen_op = ctrl->aen_ops;
1845 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1846 if (!aen_op->fcp_req.private)
1847 continue;
1848
1849 __nvme_fc_exit_request(ctrl, aen_op);
1850
1851 kfree(aen_op->fcp_req.private);
1852 aen_op->fcp_req.private = NULL;
1853 }
1854}
1855
1856static inline void
1857__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1858 unsigned int qidx)
1859{
1860 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1861
1862 hctx->driver_data = queue;
1863 queue->hctx = hctx;
1864}
1865
1866static int
1867nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1868 unsigned int hctx_idx)
1869{
1870 struct nvme_fc_ctrl *ctrl = data;
1871
1872 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1873
1874 return 0;
1875}
1876
1877static int
1878nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1879 unsigned int hctx_idx)
1880{
1881 struct nvme_fc_ctrl *ctrl = data;
1882
1883 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1884
1885 return 0;
1886}
1887
1888static void
1889nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1890{
1891 struct nvme_fc_queue *queue;
1892
1893 queue = &ctrl->queues[idx];
1894 memset(queue, 0, sizeof(*queue));
1895 queue->ctrl = ctrl;
1896 queue->qnum = idx;
1897 atomic_set(&queue->csn, 1);
1898 queue->dev = ctrl->dev;
1899
1900 if (idx > 0)
1901 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1902 else
1903 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915}
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925static void
1926nvme_fc_free_queue(struct nvme_fc_queue *queue)
1927{
1928 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1929 return;
1930
1931 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1932
1933
1934
1935
1936
1937
1938 queue->connection_id = 0;
1939}
1940
1941static void
1942__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1943 struct nvme_fc_queue *queue, unsigned int qidx)
1944{
1945 if (ctrl->lport->ops->delete_queue)
1946 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1947 queue->lldd_handle);
1948 queue->lldd_handle = NULL;
1949}
1950
1951static void
1952nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1953{
1954 int i;
1955
1956 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1957 nvme_fc_free_queue(&ctrl->queues[i]);
1958}
1959
1960static int
1961__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1962 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1963{
1964 int ret = 0;
1965
1966 queue->lldd_handle = NULL;
1967 if (ctrl->lport->ops->create_queue)
1968 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1969 qidx, qsize, &queue->lldd_handle);
1970
1971 return ret;
1972}
1973
1974static void
1975nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1976{
1977 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1978 int i;
1979
1980 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1981 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1982}
1983
1984static int
1985nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1986{
1987 struct nvme_fc_queue *queue = &ctrl->queues[1];
1988 int i, ret;
1989
1990 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1991 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1992 if (ret)
1993 goto delete_queues;
1994 }
1995
1996 return 0;
1997
1998delete_queues:
1999 for (; i >= 0; i--)
2000 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2001 return ret;
2002}
2003
2004static int
2005nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2006{
2007 int i, ret = 0;
2008
2009 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2010 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2011 (qsize / 5));
2012 if (ret)
2013 break;
2014 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2015 if (ret)
2016 break;
2017
2018 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2019 }
2020
2021 return ret;
2022}
2023
2024static void
2025nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2026{
2027 int i;
2028
2029 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2030 nvme_fc_init_queue(ctrl, i);
2031}
2032
2033static void
2034nvme_fc_ctrl_free(struct kref *ref)
2035{
2036 struct nvme_fc_ctrl *ctrl =
2037 container_of(ref, struct nvme_fc_ctrl, ref);
2038 unsigned long flags;
2039
2040 if (ctrl->ctrl.tagset) {
2041 blk_cleanup_queue(ctrl->ctrl.connect_q);
2042 blk_mq_free_tag_set(&ctrl->tag_set);
2043 }
2044
2045
2046 spin_lock_irqsave(&ctrl->rport->lock, flags);
2047 list_del(&ctrl->ctrl_list);
2048 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2049
2050 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2051 blk_cleanup_queue(ctrl->ctrl.admin_q);
2052 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2053
2054 kfree(ctrl->queues);
2055
2056 put_device(ctrl->dev);
2057 nvme_fc_rport_put(ctrl->rport);
2058
2059 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2060 if (ctrl->ctrl.opts)
2061 nvmf_free_options(ctrl->ctrl.opts);
2062 kfree(ctrl);
2063}
2064
2065static void
2066nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2067{
2068 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2069}
2070
2071static int
2072nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2073{
2074 return kref_get_unless_zero(&ctrl->ref);
2075}
2076
2077
2078
2079
2080
2081static void
2082nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2083{
2084 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2085
2086 WARN_ON(nctrl != &ctrl->ctrl);
2087
2088 nvme_fc_ctrl_put(ctrl);
2089}
2090
2091static void
2092nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2093{
2094
2095 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2096 return;
2097
2098 dev_warn(ctrl->ctrl.device,
2099 "NVME-FC{%d}: transport association error detected: %s\n",
2100 ctrl->cnum, errmsg);
2101 dev_warn(ctrl->ctrl.device,
2102 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2103
2104 nvme_reset_ctrl(&ctrl->ctrl);
2105}
2106
2107static enum blk_eh_timer_return
2108nvme_fc_timeout(struct request *rq, bool reserved)
2109{
2110 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2111 struct nvme_fc_ctrl *ctrl = op->ctrl;
2112 int ret;
2113
2114 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2115 atomic_read(&op->state) == FCPOP_STATE_ABORTED)
2116 return BLK_EH_RESET_TIMER;
2117
2118 ret = __nvme_fc_abort_op(ctrl, op);
2119 if (ret)
2120
2121 return BLK_EH_NOT_HANDLED;
2122
2123
2124
2125
2126
2127
2128
2129
2130 nvme_fc_error_recovery(ctrl, "io timeout error");
2131
2132
2133
2134
2135
2136
2137 return BLK_EH_RESET_TIMER;
2138}
2139
2140static int
2141nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2142 struct nvme_fc_fcp_op *op)
2143{
2144 struct nvmefc_fcp_req *freq = &op->fcp_req;
2145 enum dma_data_direction dir;
2146 int ret;
2147
2148 freq->sg_cnt = 0;
2149
2150 if (!blk_rq_payload_bytes(rq))
2151 return 0;
2152
2153 freq->sg_table.sgl = freq->first_sgl;
2154 ret = sg_alloc_table_chained(&freq->sg_table,
2155 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
2156 if (ret)
2157 return -ENOMEM;
2158
2159 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2160 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2161 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2162 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2163 op->nents, dir);
2164 if (unlikely(freq->sg_cnt <= 0)) {
2165 sg_free_table_chained(&freq->sg_table, true);
2166 freq->sg_cnt = 0;
2167 return -EFAULT;
2168 }
2169
2170
2171
2172
2173 return 0;
2174}
2175
2176static void
2177nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2178 struct nvme_fc_fcp_op *op)
2179{
2180 struct nvmefc_fcp_req *freq = &op->fcp_req;
2181
2182 if (!freq->sg_cnt)
2183 return;
2184
2185 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2186 ((rq_data_dir(rq) == WRITE) ?
2187 DMA_TO_DEVICE : DMA_FROM_DEVICE));
2188
2189 nvme_cleanup_cmd(rq);
2190
2191 sg_free_table_chained(&freq->sg_table, true);
2192
2193 freq->sg_cnt = 0;
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static blk_status_t
2220nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2221 struct nvme_fc_fcp_op *op, u32 data_len,
2222 enum nvmefc_fcp_datadir io_dir)
2223{
2224 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2225 struct nvme_command *sqe = &cmdiu->sqe;
2226 u32 csn;
2227 int ret;
2228
2229
2230
2231
2232
2233 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2234 goto busy;
2235
2236 if (!nvme_fc_ctrl_get(ctrl))
2237 return BLK_STS_IOERR;
2238
2239
2240 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2241 csn = atomic_inc_return(&queue->csn);
2242 cmdiu->csn = cpu_to_be32(csn);
2243 cmdiu->data_len = cpu_to_be32(data_len);
2244 switch (io_dir) {
2245 case NVMEFC_FCP_WRITE:
2246 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2247 break;
2248 case NVMEFC_FCP_READ:
2249 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2250 break;
2251 case NVMEFC_FCP_NODATA:
2252 cmdiu->flags = 0;
2253 break;
2254 }
2255 op->fcp_req.payload_length = data_len;
2256 op->fcp_req.io_dir = io_dir;
2257 op->fcp_req.transferred_length = 0;
2258 op->fcp_req.rcv_rsplen = 0;
2259 op->fcp_req.status = NVME_SC_SUCCESS;
2260 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2261
2262
2263
2264
2265
2266 WARN_ON_ONCE(sqe->common.metadata);
2267 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2268
2269
2270
2271
2272
2273
2274
2275
2276 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2277 NVME_SGL_FMT_TRANSPORT_A;
2278 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2279 sqe->rw.dptr.sgl.addr = 0;
2280
2281 if (!(op->flags & FCOP_FLAGS_AEN)) {
2282 ret = nvme_fc_map_data(ctrl, op->rq, op);
2283 if (ret < 0) {
2284 nvme_cleanup_cmd(op->rq);
2285 nvme_fc_ctrl_put(ctrl);
2286 if (ret == -ENOMEM || ret == -EAGAIN)
2287 return BLK_STS_RESOURCE;
2288 return BLK_STS_IOERR;
2289 }
2290 }
2291
2292 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2293 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2294
2295 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2296
2297 if (!(op->flags & FCOP_FLAGS_AEN))
2298 blk_mq_start_request(op->rq);
2299
2300 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2301 &ctrl->rport->remoteport,
2302 queue->lldd_handle, &op->fcp_req);
2303
2304 if (ret) {
2305 if (!(op->flags & FCOP_FLAGS_AEN))
2306 nvme_fc_unmap_data(ctrl, op->rq, op);
2307
2308 nvme_fc_ctrl_put(ctrl);
2309
2310 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2311 ret != -EBUSY)
2312 return BLK_STS_IOERR;
2313
2314 goto busy;
2315 }
2316
2317 return BLK_STS_OK;
2318
2319busy:
2320 if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
2321 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
2322
2323 return BLK_STS_RESOURCE;
2324}
2325
2326static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
2327 struct request *rq)
2328{
2329 if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
2330 return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
2331 return BLK_STS_OK;
2332}
2333
2334static blk_status_t
2335nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2336 const struct blk_mq_queue_data *bd)
2337{
2338 struct nvme_ns *ns = hctx->queue->queuedata;
2339 struct nvme_fc_queue *queue = hctx->driver_data;
2340 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2341 struct request *rq = bd->rq;
2342 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2343 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2344 struct nvme_command *sqe = &cmdiu->sqe;
2345 enum nvmefc_fcp_datadir io_dir;
2346 u32 data_len;
2347 blk_status_t ret;
2348
2349 ret = nvme_fc_is_ready(queue, rq);
2350 if (unlikely(ret))
2351 return ret;
2352
2353 ret = nvme_setup_cmd(ns, rq, sqe);
2354 if (ret)
2355 return ret;
2356
2357 data_len = blk_rq_payload_bytes(rq);
2358 if (data_len)
2359 io_dir = ((rq_data_dir(rq) == WRITE) ?
2360 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2361 else
2362 io_dir = NVMEFC_FCP_NODATA;
2363
2364 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2365}
2366
2367static struct blk_mq_tags *
2368nvme_fc_tagset(struct nvme_fc_queue *queue)
2369{
2370 if (queue->qnum == 0)
2371 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2372
2373 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2374}
2375
2376static int
2377nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2378
2379{
2380 struct nvme_fc_queue *queue = hctx->driver_data;
2381 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2382 struct request *req;
2383 struct nvme_fc_fcp_op *op;
2384
2385 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2386 if (!req)
2387 return 0;
2388
2389 op = blk_mq_rq_to_pdu(req);
2390
2391 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2392 (ctrl->lport->ops->poll_queue))
2393 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2394 queue->lldd_handle);
2395
2396 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2397}
2398
2399static void
2400nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2401{
2402 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2403 struct nvme_fc_fcp_op *aen_op;
2404 unsigned long flags;
2405 bool terminating = false;
2406 blk_status_t ret;
2407
2408 spin_lock_irqsave(&ctrl->lock, flags);
2409 if (ctrl->flags & FCCTRL_TERMIO)
2410 terminating = true;
2411 spin_unlock_irqrestore(&ctrl->lock, flags);
2412
2413 if (terminating)
2414 return;
2415
2416 aen_op = &ctrl->aen_ops[0];
2417
2418 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2419 NVMEFC_FCP_NODATA);
2420 if (ret)
2421 dev_err(ctrl->ctrl.device,
2422 "failed async event work\n");
2423}
2424
2425static void
2426__nvme_fc_final_op_cleanup(struct request *rq)
2427{
2428 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2429 struct nvme_fc_ctrl *ctrl = op->ctrl;
2430
2431 atomic_set(&op->state, FCPOP_STATE_IDLE);
2432 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2433 FCOP_FLAGS_COMPLETE);
2434
2435 nvme_fc_unmap_data(ctrl, rq, op);
2436 nvme_complete_rq(rq);
2437 nvme_fc_ctrl_put(ctrl);
2438
2439}
2440
2441static void
2442nvme_fc_complete_rq(struct request *rq)
2443{
2444 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2445 struct nvme_fc_ctrl *ctrl = op->ctrl;
2446 unsigned long flags;
2447 bool completed = false;
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457 spin_lock_irqsave(&ctrl->lock, flags);
2458 if (op->flags & FCOP_FLAGS_COMPLETE)
2459 completed = true;
2460 else
2461 op->flags |= FCOP_FLAGS_RELEASED;
2462 spin_unlock_irqrestore(&ctrl->lock, flags);
2463
2464 if (completed)
2465 __nvme_fc_final_op_cleanup(rq);
2466}
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481static void
2482nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2483{
2484 struct nvme_ctrl *nctrl = data;
2485 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2486 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2487 unsigned long flags;
2488 int status;
2489
2490 if (!blk_mq_request_started(req))
2491 return;
2492
2493 spin_lock_irqsave(&ctrl->lock, flags);
2494 if (ctrl->flags & FCCTRL_TERMIO) {
2495 ctrl->iocnt++;
2496 op->flags |= FCOP_FLAGS_TERMIO;
2497 }
2498 spin_unlock_irqrestore(&ctrl->lock, flags);
2499
2500 status = __nvme_fc_abort_op(ctrl, op);
2501 if (status) {
2502
2503
2504
2505
2506
2507
2508
2509 spin_lock_irqsave(&ctrl->lock, flags);
2510 if (ctrl->flags & FCCTRL_TERMIO)
2511 ctrl->iocnt--;
2512 op->flags &= ~FCOP_FLAGS_TERMIO;
2513 spin_unlock_irqrestore(&ctrl->lock, flags);
2514 return;
2515 }
2516}
2517
2518
2519static const struct blk_mq_ops nvme_fc_mq_ops = {
2520 .queue_rq = nvme_fc_queue_rq,
2521 .complete = nvme_fc_complete_rq,
2522 .init_request = nvme_fc_init_request,
2523 .exit_request = nvme_fc_exit_request,
2524 .init_hctx = nvme_fc_init_hctx,
2525 .poll = nvme_fc_poll,
2526 .timeout = nvme_fc_timeout,
2527};
2528
2529static int
2530nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2531{
2532 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2533 unsigned int nr_io_queues;
2534 int ret;
2535
2536 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2537 ctrl->lport->ops->max_hw_queues);
2538 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2539 if (ret) {
2540 dev_info(ctrl->ctrl.device,
2541 "set_queue_count failed: %d\n", ret);
2542 return ret;
2543 }
2544
2545 ctrl->ctrl.queue_count = nr_io_queues + 1;
2546 if (!nr_io_queues)
2547 return 0;
2548
2549 nvme_fc_init_io_queues(ctrl);
2550
2551 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2552 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2553 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2554 ctrl->tag_set.reserved_tags = 1;
2555 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2556 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2557 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2558 (SG_CHUNK_SIZE *
2559 sizeof(struct scatterlist)) +
2560 ctrl->lport->ops->fcprqst_priv_sz;
2561 ctrl->tag_set.driver_data = ctrl;
2562 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2563 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2564
2565 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2566 if (ret)
2567 return ret;
2568
2569 ctrl->ctrl.tagset = &ctrl->tag_set;
2570
2571 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2572 if (IS_ERR(ctrl->ctrl.connect_q)) {
2573 ret = PTR_ERR(ctrl->ctrl.connect_q);
2574 goto out_free_tag_set;
2575 }
2576
2577 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2578 if (ret)
2579 goto out_cleanup_blk_queue;
2580
2581 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2582 if (ret)
2583 goto out_delete_hw_queues;
2584
2585 return 0;
2586
2587out_delete_hw_queues:
2588 nvme_fc_delete_hw_io_queues(ctrl);
2589out_cleanup_blk_queue:
2590 blk_cleanup_queue(ctrl->ctrl.connect_q);
2591out_free_tag_set:
2592 blk_mq_free_tag_set(&ctrl->tag_set);
2593 nvme_fc_free_io_queues(ctrl);
2594
2595
2596 ctrl->ctrl.tagset = NULL;
2597
2598 return ret;
2599}
2600
2601static int
2602nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2603{
2604 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2605 unsigned int nr_io_queues;
2606 int ret;
2607
2608 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2609 ctrl->lport->ops->max_hw_queues);
2610 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2611 if (ret) {
2612 dev_info(ctrl->ctrl.device,
2613 "set_queue_count failed: %d\n", ret);
2614 return ret;
2615 }
2616
2617 ctrl->ctrl.queue_count = nr_io_queues + 1;
2618
2619 if (ctrl->ctrl.queue_count == 1)
2620 return 0;
2621
2622 nvme_fc_init_io_queues(ctrl);
2623
2624 ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
2625 if (ret)
2626 goto out_free_io_queues;
2627
2628 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2629 if (ret)
2630 goto out_free_io_queues;
2631
2632 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2633 if (ret)
2634 goto out_delete_hw_queues;
2635
2636 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2637
2638 return 0;
2639
2640out_delete_hw_queues:
2641 nvme_fc_delete_hw_io_queues(ctrl);
2642out_free_io_queues:
2643 nvme_fc_free_io_queues(ctrl);
2644 return ret;
2645}
2646
2647static void
2648nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2649{
2650 struct nvme_fc_lport *lport = rport->lport;
2651
2652 atomic_inc(&lport->act_rport_cnt);
2653}
2654
2655static void
2656nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2657{
2658 struct nvme_fc_lport *lport = rport->lport;
2659 u32 cnt;
2660
2661 cnt = atomic_dec_return(&lport->act_rport_cnt);
2662 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2663 lport->ops->localport_delete(&lport->localport);
2664}
2665
2666static int
2667nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2668{
2669 struct nvme_fc_rport *rport = ctrl->rport;
2670 u32 cnt;
2671
2672 if (ctrl->assoc_active)
2673 return 1;
2674
2675 ctrl->assoc_active = true;
2676 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2677 if (cnt == 1)
2678 nvme_fc_rport_active_on_lport(rport);
2679
2680 return 0;
2681}
2682
2683static int
2684nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2685{
2686 struct nvme_fc_rport *rport = ctrl->rport;
2687 struct nvme_fc_lport *lport = rport->lport;
2688 u32 cnt;
2689
2690
2691
2692 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2693 if (cnt == 0) {
2694 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2695 lport->ops->remoteport_delete(&rport->remoteport);
2696 nvme_fc_rport_inactive_on_lport(rport);
2697 }
2698
2699 return 0;
2700}
2701
2702
2703
2704
2705
2706static int
2707nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2708{
2709 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2710 int ret;
2711 bool changed;
2712
2713 ++ctrl->ctrl.nr_reconnects;
2714
2715 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2716 return -ENODEV;
2717
2718 if (nvme_fc_ctlr_active_on_rport(ctrl))
2719 return -ENOTUNIQ;
2720
2721
2722
2723
2724
2725 nvme_fc_init_queue(ctrl, 0);
2726
2727 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2728 NVME_AQ_BLK_MQ_DEPTH);
2729 if (ret)
2730 goto out_free_queue;
2731
2732 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2733 NVME_AQ_BLK_MQ_DEPTH,
2734 (NVME_AQ_BLK_MQ_DEPTH / 4));
2735 if (ret)
2736 goto out_delete_hw_queue;
2737
2738 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2739 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2740
2741 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2742 if (ret)
2743 goto out_disconnect_admin_queue;
2744
2745 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2746
2747
2748
2749
2750
2751
2752
2753
2754 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2755 if (ret) {
2756 dev_err(ctrl->ctrl.device,
2757 "prop_get NVME_REG_CAP failed\n");
2758 goto out_disconnect_admin_queue;
2759 }
2760
2761 ctrl->ctrl.sqsize =
2762 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
2763
2764 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2765 if (ret)
2766 goto out_disconnect_admin_queue;
2767
2768 ctrl->ctrl.max_hw_sectors =
2769 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2770
2771 ret = nvme_init_identify(&ctrl->ctrl);
2772 if (ret)
2773 goto out_disconnect_admin_queue;
2774
2775
2776
2777
2778 if (ctrl->ctrl.icdoff) {
2779 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2780 ctrl->ctrl.icdoff);
2781 goto out_disconnect_admin_queue;
2782 }
2783
2784
2785
2786 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2787
2788 dev_warn(ctrl->ctrl.device,
2789 "queue_size %zu > ctrl maxcmd %u, reducing "
2790 "to queue_size\n",
2791 opts->queue_size, ctrl->ctrl.maxcmd);
2792 opts->queue_size = ctrl->ctrl.maxcmd;
2793 }
2794
2795 ret = nvme_fc_init_aen_ops(ctrl);
2796 if (ret)
2797 goto out_term_aen_ops;
2798
2799
2800
2801
2802
2803 if (ctrl->ctrl.queue_count > 1) {
2804 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2805 ret = nvme_fc_create_io_queues(ctrl);
2806 else
2807 ret = nvme_fc_reinit_io_queues(ctrl);
2808 if (ret)
2809 goto out_term_aen_ops;
2810 }
2811
2812 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2813
2814 ctrl->ctrl.nr_reconnects = 0;
2815
2816 if (changed)
2817 nvme_start_ctrl(&ctrl->ctrl);
2818
2819 return 0;
2820
2821out_term_aen_ops:
2822 nvme_fc_term_aen_ops(ctrl);
2823out_disconnect_admin_queue:
2824
2825 nvme_fc_xmt_disconnect_assoc(ctrl);
2826out_delete_hw_queue:
2827 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2828out_free_queue:
2829 nvme_fc_free_queue(&ctrl->queues[0]);
2830 ctrl->assoc_active = false;
2831 nvme_fc_ctlr_inactive_on_rport(ctrl);
2832
2833 return ret;
2834}
2835
2836
2837
2838
2839
2840
2841
2842static void
2843nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2844{
2845 unsigned long flags;
2846
2847 if (!ctrl->assoc_active)
2848 return;
2849 ctrl->assoc_active = false;
2850
2851 spin_lock_irqsave(&ctrl->lock, flags);
2852 ctrl->flags |= FCCTRL_TERMIO;
2853 ctrl->iocnt = 0;
2854 spin_unlock_irqrestore(&ctrl->lock, flags);
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868 if (ctrl->ctrl.queue_count > 1) {
2869 nvme_stop_queues(&ctrl->ctrl);
2870 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2871 nvme_fc_terminate_exchange, &ctrl->ctrl);
2872 }
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2892 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2893 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2894 nvme_fc_terminate_exchange, &ctrl->ctrl);
2895
2896
2897 nvme_fc_abort_aen_ops(ctrl);
2898
2899
2900 spin_lock_irq(&ctrl->lock);
2901 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2902 ctrl->flags &= ~FCCTRL_TERMIO;
2903 spin_unlock_irq(&ctrl->lock);
2904
2905 nvme_fc_term_aen_ops(ctrl);
2906
2907
2908
2909
2910
2911
2912
2913 if (ctrl->association_id)
2914 nvme_fc_xmt_disconnect_assoc(ctrl);
2915
2916 if (ctrl->ctrl.tagset) {
2917 nvme_fc_delete_hw_io_queues(ctrl);
2918 nvme_fc_free_io_queues(ctrl);
2919 }
2920
2921 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2922 nvme_fc_free_queue(&ctrl->queues[0]);
2923
2924 nvme_fc_ctlr_inactive_on_rport(ctrl);
2925}
2926
2927static void
2928nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2929{
2930 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2931
2932 cancel_delayed_work_sync(&ctrl->connect_work);
2933
2934
2935
2936
2937 nvme_fc_delete_association(ctrl);
2938}
2939
2940static void
2941nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2942{
2943 struct nvme_fc_rport *rport = ctrl->rport;
2944 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2945 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2946 bool recon = true;
2947
2948 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
2949 return;
2950
2951 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2952 dev_info(ctrl->ctrl.device,
2953 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2954 ctrl->cnum, status);
2955 else if (time_after_eq(jiffies, rport->dev_loss_end))
2956 recon = false;
2957
2958 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2959 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2960 dev_info(ctrl->ctrl.device,
2961 "NVME-FC{%d}: Reconnect attempt in %ld "
2962 "seconds\n",
2963 ctrl->cnum, recon_delay / HZ);
2964 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2965 recon_delay = rport->dev_loss_end - jiffies;
2966
2967 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2968 } else {
2969 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2970 dev_warn(ctrl->ctrl.device,
2971 "NVME-FC{%d}: Max reconnect attempts (%d) "
2972 "reached. Removing controller\n",
2973 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2974 else
2975 dev_warn(ctrl->ctrl.device,
2976 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2977 "while waiting for remoteport connectivity. "
2978 "Removing controller\n", ctrl->cnum,
2979 portptr->dev_loss_tmo);
2980 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2981 }
2982}
2983
2984static void
2985nvme_fc_reset_ctrl_work(struct work_struct *work)
2986{
2987 struct nvme_fc_ctrl *ctrl =
2988 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2989 int ret;
2990
2991 nvme_stop_ctrl(&ctrl->ctrl);
2992
2993
2994 nvme_fc_delete_association(ctrl);
2995
2996 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
2997 dev_err(ctrl->ctrl.device,
2998 "NVME-FC{%d}: error_recovery: Couldn't change state "
2999 "to RECONNECTING\n", ctrl->cnum);
3000 return;
3001 }
3002
3003 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
3004 ret = nvme_fc_create_association(ctrl);
3005 else
3006 ret = -ENOTCONN;
3007
3008 if (ret)
3009 nvme_fc_reconnect_or_delete(ctrl, ret);
3010 else
3011 dev_info(ctrl->ctrl.device,
3012 "NVME-FC{%d}: controller reset complete\n",
3013 ctrl->cnum);
3014}
3015
3016static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3017 .name = "fc",
3018 .module = THIS_MODULE,
3019 .flags = NVME_F_FABRICS,
3020 .reg_read32 = nvmf_reg_read32,
3021 .reg_read64 = nvmf_reg_read64,
3022 .reg_write32 = nvmf_reg_write32,
3023 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3024 .submit_async_event = nvme_fc_submit_async_event,
3025 .delete_ctrl = nvme_fc_delete_ctrl,
3026 .get_address = nvmf_get_address,
3027 .reinit_request = nvme_fc_reinit_request,
3028};
3029
3030static void
3031nvme_fc_connect_ctrl_work(struct work_struct *work)
3032{
3033 int ret;
3034
3035 struct nvme_fc_ctrl *ctrl =
3036 container_of(to_delayed_work(work),
3037 struct nvme_fc_ctrl, connect_work);
3038
3039 ret = nvme_fc_create_association(ctrl);
3040 if (ret)
3041 nvme_fc_reconnect_or_delete(ctrl, ret);
3042 else
3043 dev_info(ctrl->ctrl.device,
3044 "NVME-FC{%d}: controller reconnect complete\n",
3045 ctrl->cnum);
3046}
3047
3048
3049static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3050 .queue_rq = nvme_fc_queue_rq,
3051 .complete = nvme_fc_complete_rq,
3052 .init_request = nvme_fc_init_request,
3053 .exit_request = nvme_fc_exit_request,
3054 .init_hctx = nvme_fc_init_admin_hctx,
3055 .timeout = nvme_fc_timeout,
3056};
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067static bool
3068nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3069 struct nvmf_ctrl_options *opts)
3070{
3071 struct nvme_fc_ctrl *ctrl;
3072 unsigned long flags;
3073 bool found = false;
3074
3075 spin_lock_irqsave(&rport->lock, flags);
3076 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3077 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3078 if (found)
3079 break;
3080 }
3081 spin_unlock_irqrestore(&rport->lock, flags);
3082
3083 return found;
3084}
3085
3086static struct nvme_ctrl *
3087nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3088 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3089{
3090 struct nvme_fc_ctrl *ctrl;
3091 unsigned long flags;
3092 int ret, idx, retry;
3093
3094 if (!(rport->remoteport.port_role &
3095 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3096 ret = -EBADR;
3097 goto out_fail;
3098 }
3099
3100 if (!opts->duplicate_connect &&
3101 nvme_fc_existing_controller(rport, opts)) {
3102 ret = -EALREADY;
3103 goto out_fail;
3104 }
3105
3106 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3107 if (!ctrl) {
3108 ret = -ENOMEM;
3109 goto out_fail;
3110 }
3111
3112 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3113 if (idx < 0) {
3114 ret = -ENOSPC;
3115 goto out_free_ctrl;
3116 }
3117
3118 ctrl->ctrl.opts = opts;
3119 INIT_LIST_HEAD(&ctrl->ctrl_list);
3120 ctrl->lport = lport;
3121 ctrl->rport = rport;
3122 ctrl->dev = lport->dev;
3123 ctrl->cnum = idx;
3124 ctrl->assoc_active = false;
3125 init_waitqueue_head(&ctrl->ioabort_wait);
3126
3127 get_device(ctrl->dev);
3128 kref_init(&ctrl->ref);
3129
3130 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3131 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3132 spin_lock_init(&ctrl->lock);
3133
3134
3135 ctrl->ctrl.queue_count = min_t(unsigned int,
3136 opts->nr_io_queues,
3137 lport->ops->max_hw_queues);
3138 ctrl->ctrl.queue_count++;
3139
3140 ctrl->ctrl.sqsize = opts->queue_size - 1;
3141 ctrl->ctrl.kato = opts->kato;
3142
3143 ret = -ENOMEM;
3144 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3145 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3146 if (!ctrl->queues)
3147 goto out_free_ida;
3148
3149 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3150 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3151 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3152 ctrl->admin_tag_set.reserved_tags = 2;
3153 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3154 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
3155 (SG_CHUNK_SIZE *
3156 sizeof(struct scatterlist)) +
3157 ctrl->lport->ops->fcprqst_priv_sz;
3158 ctrl->admin_tag_set.driver_data = ctrl;
3159 ctrl->admin_tag_set.nr_hw_queues = 1;
3160 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3161 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3162
3163 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3164 if (ret)
3165 goto out_free_queues;
3166 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3167
3168 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3169 if (IS_ERR(ctrl->ctrl.admin_q)) {
3170 ret = PTR_ERR(ctrl->ctrl.admin_q);
3171 goto out_free_admin_tag_set;
3172 }
3173
3174
3175
3176
3177
3178
3179
3180
3181 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3182 if (ret)
3183 goto out_cleanup_admin_q;
3184
3185
3186
3187 spin_lock_irqsave(&rport->lock, flags);
3188 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3189 spin_unlock_irqrestore(&rport->lock, flags);
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 for (retry = 0; retry < 3; retry++) {
3210 ret = nvme_fc_create_association(ctrl);
3211 if (!ret)
3212 break;
3213 }
3214
3215 if (ret) {
3216
3217 dev_err(ctrl->ctrl.device,
3218 "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
3219
3220 ctrl->ctrl.opts = NULL;
3221
3222
3223 nvme_uninit_ctrl(&ctrl->ctrl);
3224
3225
3226 nvme_put_ctrl(&ctrl->ctrl);
3227
3228
3229
3230
3231
3232
3233
3234
3235 nvme_fc_rport_get(rport);
3236
3237 if (ret > 0)
3238 ret = -EIO;
3239 return ERR_PTR(ret);
3240 }
3241
3242 nvme_get_ctrl(&ctrl->ctrl);
3243
3244 dev_info(ctrl->ctrl.device,
3245 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3246 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3247
3248 return &ctrl->ctrl;
3249
3250out_cleanup_admin_q:
3251 blk_cleanup_queue(ctrl->ctrl.admin_q);
3252out_free_admin_tag_set:
3253 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3254out_free_queues:
3255 kfree(ctrl->queues);
3256out_free_ida:
3257 put_device(ctrl->dev);
3258 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3259out_free_ctrl:
3260 kfree(ctrl);
3261out_fail:
3262
3263 return ERR_PTR(ret);
3264}
3265
3266
3267struct nvmet_fc_traddr {
3268 u64 nn;
3269 u64 pn;
3270};
3271
3272static int
3273__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3274{
3275 u64 token64;
3276
3277 if (match_u64(sstr, &token64))
3278 return -EINVAL;
3279 *val = token64;
3280
3281 return 0;
3282}
3283
3284
3285
3286
3287
3288
3289static int
3290nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3291{
3292 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3293 substring_t wwn = { name, &name[sizeof(name)-1] };
3294 int nnoffset, pnoffset;
3295
3296
3297 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3298 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3299 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3300 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3301 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3302 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3303 NVME_FC_TRADDR_OXNNLEN;
3304 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3305 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3306 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3307 "pn-", NVME_FC_TRADDR_NNLEN))) {
3308 nnoffset = NVME_FC_TRADDR_NNLEN;
3309 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3310 } else
3311 goto out_einval;
3312
3313 name[0] = '0';
3314 name[1] = 'x';
3315 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3316
3317 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3318 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3319 goto out_einval;
3320
3321 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3322 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3323 goto out_einval;
3324
3325 return 0;
3326
3327out_einval:
3328 pr_warn("%s: bad traddr string\n", __func__);
3329 return -EINVAL;
3330}
3331
3332static struct nvme_ctrl *
3333nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3334{
3335 struct nvme_fc_lport *lport;
3336 struct nvme_fc_rport *rport;
3337 struct nvme_ctrl *ctrl;
3338 struct nvmet_fc_traddr laddr = { 0L, 0L };
3339 struct nvmet_fc_traddr raddr = { 0L, 0L };
3340 unsigned long flags;
3341 int ret;
3342
3343 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3344 if (ret || !raddr.nn || !raddr.pn)
3345 return ERR_PTR(-EINVAL);
3346
3347 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3348 if (ret || !laddr.nn || !laddr.pn)
3349 return ERR_PTR(-EINVAL);
3350
3351
3352 spin_lock_irqsave(&nvme_fc_lock, flags);
3353 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3354 if (lport->localport.node_name != laddr.nn ||
3355 lport->localport.port_name != laddr.pn)
3356 continue;
3357
3358 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3359 if (rport->remoteport.node_name != raddr.nn ||
3360 rport->remoteport.port_name != raddr.pn)
3361 continue;
3362
3363
3364 if (!nvme_fc_rport_get(rport))
3365 break;
3366
3367 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3368
3369 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3370 if (IS_ERR(ctrl))
3371 nvme_fc_rport_put(rport);
3372 return ctrl;
3373 }
3374 }
3375 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3376
3377 return ERR_PTR(-ENOENT);
3378}
3379
3380
3381static struct nvmf_transport_ops nvme_fc_transport = {
3382 .name = "fc",
3383 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3384 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3385 .create_ctrl = nvme_fc_create_ctrl,
3386};
3387
3388static int __init nvme_fc_init_module(void)
3389{
3390 int ret;
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406 fc_class = class_create(THIS_MODULE, "fc");
3407 if (IS_ERR(fc_class)) {
3408 pr_err("couldn't register class fc\n");
3409 return PTR_ERR(fc_class);
3410 }
3411
3412
3413
3414
3415 fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
3416 "fc_udev_device");
3417 if (IS_ERR(fc_udev_device)) {
3418 pr_err("couldn't create fc_udev device!\n");
3419 ret = PTR_ERR(fc_udev_device);
3420 goto out_destroy_class;
3421 }
3422
3423 ret = nvmf_register_transport(&nvme_fc_transport);
3424 if (ret)
3425 goto out_destroy_device;
3426
3427 return 0;
3428
3429out_destroy_device:
3430 device_destroy(fc_class, MKDEV(0, 0));
3431out_destroy_class:
3432 class_destroy(fc_class);
3433 return ret;
3434}
3435
3436static void __exit nvme_fc_exit_module(void)
3437{
3438
3439 if (!list_empty(&nvme_fc_lport_list))
3440 pr_warn("%s: localport list not empty\n", __func__);
3441
3442 nvmf_unregister_transport(&nvme_fc_transport);
3443
3444 ida_destroy(&nvme_fc_local_port_cnt);
3445 ida_destroy(&nvme_fc_ctrl_cnt);
3446
3447 device_destroy(fc_class, MKDEV(0, 0));
3448 class_destroy(fc_class);
3449}
3450
3451module_init(nvme_fc_init_module);
3452module_exit(nvme_fc_exit_module);
3453
3454MODULE_LICENSE("GPL v2");
3455