1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22#include <linux/delay.h>
23
24#include "nvme.h"
25#include "fabrics.h"
26#include <linux/nvme-fc-driver.h>
27#include <linux/nvme-fc.h>
28
29
30
31
32
33enum nvme_fc_queue_flags {
34 NVME_FC_Q_CONNECTED = 0,
35 NVME_FC_Q_LIVE,
36};
37
38#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
39
40struct nvme_fc_queue {
41 struct nvme_fc_ctrl *ctrl;
42 struct device *dev;
43 struct blk_mq_hw_ctx *hctx;
44 void *lldd_handle;
45 size_t cmnd_capsule_len;
46 u32 qnum;
47 u32 rqcnt;
48 u32 seqno;
49
50 u64 connection_id;
51 atomic_t csn;
52
53 unsigned long flags;
54} __aligned(sizeof(u64));
55
56enum nvme_fcop_flags {
57 FCOP_FLAGS_TERMIO = (1 << 0),
58 FCOP_FLAGS_AEN = (1 << 1),
59};
60
61struct nvmefc_ls_req_op {
62 struct nvmefc_ls_req ls_req;
63
64 struct nvme_fc_rport *rport;
65 struct nvme_fc_queue *queue;
66 struct request *rq;
67 u32 flags;
68
69 int ls_error;
70 struct completion ls_done;
71 struct list_head lsreq_list;
72 bool req_queued;
73};
74
75enum nvme_fcpop_state {
76 FCPOP_STATE_UNINIT = 0,
77 FCPOP_STATE_IDLE = 1,
78 FCPOP_STATE_ACTIVE = 2,
79 FCPOP_STATE_ABORTED = 3,
80 FCPOP_STATE_COMPLETE = 4,
81};
82
83struct nvme_fc_fcp_op {
84 struct nvme_request nreq;
85
86
87
88
89
90
91
92 struct nvmefc_fcp_req fcp_req;
93
94 struct nvme_fc_ctrl *ctrl;
95 struct nvme_fc_queue *queue;
96 struct request *rq;
97
98 atomic_t state;
99 u32 flags;
100 u32 rqno;
101 u32 nents;
102
103 struct nvme_fc_cmd_iu cmd_iu;
104 struct nvme_fc_ersp_iu rsp_iu;
105};
106
107struct nvme_fc_lport {
108 struct nvme_fc_local_port localport;
109
110 struct ida endp_cnt;
111 struct list_head port_list;
112 struct list_head endp_list;
113 struct device *dev;
114 struct nvme_fc_port_template *ops;
115 struct kref ref;
116 atomic_t act_rport_cnt;
117} __aligned(sizeof(u64));
118
119struct nvme_fc_rport {
120 struct nvme_fc_remote_port remoteport;
121
122 struct list_head endp_list;
123 struct list_head ctrl_list;
124 struct list_head ls_req_list;
125 struct device *dev;
126 struct nvme_fc_lport *lport;
127 spinlock_t lock;
128 struct kref ref;
129 atomic_t act_ctrl_cnt;
130 unsigned long dev_loss_end;
131} __aligned(sizeof(u64));
132
133enum nvme_fcctrl_flags {
134 FCCTRL_TERMIO = (1 << 0),
135};
136
137struct nvme_fc_ctrl {
138 spinlock_t lock;
139 struct nvme_fc_queue *queues;
140 struct device *dev;
141 struct nvme_fc_lport *lport;
142 struct nvme_fc_rport *rport;
143 u32 cnum;
144
145 bool assoc_active;
146 u64 association_id;
147
148 struct list_head ctrl_list;
149
150 struct blk_mq_tag_set admin_tag_set;
151 struct blk_mq_tag_set tag_set;
152
153 struct delayed_work connect_work;
154
155 struct kref ref;
156 u32 flags;
157 u32 iocnt;
158 wait_queue_head_t ioabort_wait;
159
160 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
161
162 struct nvme_ctrl ctrl;
163};
164
165static inline struct nvme_fc_ctrl *
166to_fc_ctrl(struct nvme_ctrl *ctrl)
167{
168 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
169}
170
171static inline struct nvme_fc_lport *
172localport_to_lport(struct nvme_fc_local_port *portptr)
173{
174 return container_of(portptr, struct nvme_fc_lport, localport);
175}
176
177static inline struct nvme_fc_rport *
178remoteport_to_rport(struct nvme_fc_remote_port *portptr)
179{
180 return container_of(portptr, struct nvme_fc_rport, remoteport);
181}
182
183static inline struct nvmefc_ls_req_op *
184ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
185{
186 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
187}
188
189static inline struct nvme_fc_fcp_op *
190fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
191{
192 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
193}
194
195
196
197
198
199
200static DEFINE_SPINLOCK(nvme_fc_lock);
201
202static LIST_HEAD(nvme_fc_lport_list);
203static DEFINE_IDA(nvme_fc_local_port_cnt);
204static DEFINE_IDA(nvme_fc_ctrl_cnt);
205
206
207
208
209
210
211
212static struct class *fc_class;
213static struct device *fc_udev_device;
214
215
216
217
218static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
219 struct nvme_fc_queue *, unsigned int);
220
221static void
222nvme_fc_free_lport(struct kref *ref)
223{
224 struct nvme_fc_lport *lport =
225 container_of(ref, struct nvme_fc_lport, ref);
226 unsigned long flags;
227
228 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
229 WARN_ON(!list_empty(&lport->endp_list));
230
231
232 spin_lock_irqsave(&nvme_fc_lock, flags);
233 list_del(&lport->port_list);
234 spin_unlock_irqrestore(&nvme_fc_lock, flags);
235
236 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
237 ida_destroy(&lport->endp_cnt);
238
239 put_device(lport->dev);
240
241 kfree(lport);
242}
243
244static void
245nvme_fc_lport_put(struct nvme_fc_lport *lport)
246{
247 kref_put(&lport->ref, nvme_fc_free_lport);
248}
249
250static int
251nvme_fc_lport_get(struct nvme_fc_lport *lport)
252{
253 return kref_get_unless_zero(&lport->ref);
254}
255
256
257static struct nvme_fc_lport *
258nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
259 struct nvme_fc_port_template *ops,
260 struct device *dev)
261{
262 struct nvme_fc_lport *lport;
263 unsigned long flags;
264
265 spin_lock_irqsave(&nvme_fc_lock, flags);
266
267 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
268 if (lport->localport.node_name != pinfo->node_name ||
269 lport->localport.port_name != pinfo->port_name)
270 continue;
271
272 if (lport->dev != dev) {
273 lport = ERR_PTR(-EXDEV);
274 goto out_done;
275 }
276
277 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
278 lport = ERR_PTR(-EEXIST);
279 goto out_done;
280 }
281
282 if (!nvme_fc_lport_get(lport)) {
283
284
285
286
287 lport = NULL;
288 goto out_done;
289 }
290
291
292
293 lport->ops = ops;
294 lport->localport.port_role = pinfo->port_role;
295 lport->localport.port_id = pinfo->port_id;
296 lport->localport.port_state = FC_OBJSTATE_ONLINE;
297
298 spin_unlock_irqrestore(&nvme_fc_lock, flags);
299
300 return lport;
301 }
302
303 lport = NULL;
304
305out_done:
306 spin_unlock_irqrestore(&nvme_fc_lock, flags);
307
308 return lport;
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328int
329nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
330 struct nvme_fc_port_template *template,
331 struct device *dev,
332 struct nvme_fc_local_port **portptr)
333{
334 struct nvme_fc_lport *newrec;
335 unsigned long flags;
336 int ret, idx;
337
338 if (!template->localport_delete || !template->remoteport_delete ||
339 !template->ls_req || !template->fcp_io ||
340 !template->ls_abort || !template->fcp_abort ||
341 !template->max_hw_queues || !template->max_sgl_segments ||
342 !template->max_dif_sgl_segments || !template->dma_boundary) {
343 ret = -EINVAL;
344 goto out_reghost_failed;
345 }
346
347
348
349
350
351
352
353
354 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
355
356
357 if (IS_ERR(newrec)) {
358 ret = PTR_ERR(newrec);
359 goto out_reghost_failed;
360
361
362 } else if (newrec) {
363 *portptr = &newrec->localport;
364 return 0;
365 }
366
367
368
369 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
370 GFP_KERNEL);
371 if (!newrec) {
372 ret = -ENOMEM;
373 goto out_reghost_failed;
374 }
375
376 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
377 if (idx < 0) {
378 ret = -ENOSPC;
379 goto out_fail_kfree;
380 }
381
382 if (!get_device(dev) && dev) {
383 ret = -ENODEV;
384 goto out_ida_put;
385 }
386
387 INIT_LIST_HEAD(&newrec->port_list);
388 INIT_LIST_HEAD(&newrec->endp_list);
389 kref_init(&newrec->ref);
390 atomic_set(&newrec->act_rport_cnt, 0);
391 newrec->ops = template;
392 newrec->dev = dev;
393 ida_init(&newrec->endp_cnt);
394 newrec->localport.private = &newrec[1];
395 newrec->localport.node_name = pinfo->node_name;
396 newrec->localport.port_name = pinfo->port_name;
397 newrec->localport.port_role = pinfo->port_role;
398 newrec->localport.port_id = pinfo->port_id;
399 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
400 newrec->localport.port_num = idx;
401
402 spin_lock_irqsave(&nvme_fc_lock, flags);
403 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
404 spin_unlock_irqrestore(&nvme_fc_lock, flags);
405
406 if (dev)
407 dma_set_seg_boundary(dev, template->dma_boundary);
408
409 *portptr = &newrec->localport;
410 return 0;
411
412out_ida_put:
413 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
414out_fail_kfree:
415 kfree(newrec);
416out_reghost_failed:
417 *portptr = NULL;
418
419 return ret;
420}
421EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
422
423
424
425
426
427
428
429
430
431
432
433
434int
435nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
436{
437 struct nvme_fc_lport *lport = localport_to_lport(portptr);
438 unsigned long flags;
439
440 if (!portptr)
441 return -EINVAL;
442
443 spin_lock_irqsave(&nvme_fc_lock, flags);
444
445 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
446 spin_unlock_irqrestore(&nvme_fc_lock, flags);
447 return -EINVAL;
448 }
449 portptr->port_state = FC_OBJSTATE_DELETED;
450
451 spin_unlock_irqrestore(&nvme_fc_lock, flags);
452
453 if (atomic_read(&lport->act_rport_cnt) == 0)
454 lport->ops->localport_delete(&lport->localport);
455
456 nvme_fc_lport_put(lport);
457
458 return 0;
459}
460EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
461
462
463
464
465
466
467
468
469
470#define FCNVME_TRADDR_LENGTH 64
471
472static void
473nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
474 struct nvme_fc_rport *rport)
475{
476 char hostaddr[FCNVME_TRADDR_LENGTH];
477 char tgtaddr[FCNVME_TRADDR_LENGTH];
478 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
479
480 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
481 return;
482
483 snprintf(hostaddr, sizeof(hostaddr),
484 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
485 lport->localport.node_name, lport->localport.port_name);
486 snprintf(tgtaddr, sizeof(tgtaddr),
487 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
488 rport->remoteport.node_name, rport->remoteport.port_name);
489 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
490}
491
492static void
493nvme_fc_free_rport(struct kref *ref)
494{
495 struct nvme_fc_rport *rport =
496 container_of(ref, struct nvme_fc_rport, ref);
497 struct nvme_fc_lport *lport =
498 localport_to_lport(rport->remoteport.localport);
499 unsigned long flags;
500
501 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
502 WARN_ON(!list_empty(&rport->ctrl_list));
503
504
505 spin_lock_irqsave(&nvme_fc_lock, flags);
506 list_del(&rport->endp_list);
507 spin_unlock_irqrestore(&nvme_fc_lock, flags);
508
509 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
510
511 kfree(rport);
512
513 nvme_fc_lport_put(lport);
514}
515
516static void
517nvme_fc_rport_put(struct nvme_fc_rport *rport)
518{
519 kref_put(&rport->ref, nvme_fc_free_rport);
520}
521
522static int
523nvme_fc_rport_get(struct nvme_fc_rport *rport)
524{
525 return kref_get_unless_zero(&rport->ref);
526}
527
528static void
529nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
530{
531 switch (ctrl->ctrl.state) {
532 case NVME_CTRL_NEW:
533 case NVME_CTRL_CONNECTING:
534
535
536
537
538 dev_info(ctrl->ctrl.device,
539 "NVME-FC{%d}: connectivity re-established. "
540 "Attempting reconnect\n", ctrl->cnum);
541
542 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
543 break;
544
545 case NVME_CTRL_RESETTING:
546
547
548
549
550
551 break;
552
553 default:
554
555 break;
556 }
557}
558
559static struct nvme_fc_rport *
560nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
561 struct nvme_fc_port_info *pinfo)
562{
563 struct nvme_fc_rport *rport;
564 struct nvme_fc_ctrl *ctrl;
565 unsigned long flags;
566
567 spin_lock_irqsave(&nvme_fc_lock, flags);
568
569 list_for_each_entry(rport, &lport->endp_list, endp_list) {
570 if (rport->remoteport.node_name != pinfo->node_name ||
571 rport->remoteport.port_name != pinfo->port_name)
572 continue;
573
574 if (!nvme_fc_rport_get(rport)) {
575 rport = ERR_PTR(-ENOLCK);
576 goto out_done;
577 }
578
579 spin_unlock_irqrestore(&nvme_fc_lock, flags);
580
581 spin_lock_irqsave(&rport->lock, flags);
582
583
584 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
585
586 spin_unlock_irqrestore(&rport->lock, flags);
587 nvme_fc_rport_put(rport);
588 return ERR_PTR(-ESTALE);
589 }
590
591 rport->remoteport.port_role = pinfo->port_role;
592 rport->remoteport.port_id = pinfo->port_id;
593 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
594 rport->dev_loss_end = 0;
595
596
597
598
599
600 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
601 nvme_fc_resume_controller(ctrl);
602
603 spin_unlock_irqrestore(&rport->lock, flags);
604
605 return rport;
606 }
607
608 rport = NULL;
609
610out_done:
611 spin_unlock_irqrestore(&nvme_fc_lock, flags);
612
613 return rport;
614}
615
616static inline void
617__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
618 struct nvme_fc_port_info *pinfo)
619{
620 if (pinfo->dev_loss_tmo)
621 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
622 else
623 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642int
643nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
644 struct nvme_fc_port_info *pinfo,
645 struct nvme_fc_remote_port **portptr)
646{
647 struct nvme_fc_lport *lport = localport_to_lport(localport);
648 struct nvme_fc_rport *newrec;
649 unsigned long flags;
650 int ret, idx;
651
652 if (!nvme_fc_lport_get(lport)) {
653 ret = -ESHUTDOWN;
654 goto out_reghost_failed;
655 }
656
657
658
659
660
661
662 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
663
664
665 if (IS_ERR(newrec)) {
666 ret = PTR_ERR(newrec);
667 goto out_lport_put;
668
669
670 } else if (newrec) {
671 nvme_fc_lport_put(lport);
672 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
673 nvme_fc_signal_discovery_scan(lport, newrec);
674 *portptr = &newrec->remoteport;
675 return 0;
676 }
677
678
679
680 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
681 GFP_KERNEL);
682 if (!newrec) {
683 ret = -ENOMEM;
684 goto out_lport_put;
685 }
686
687 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
688 if (idx < 0) {
689 ret = -ENOSPC;
690 goto out_kfree_rport;
691 }
692
693 INIT_LIST_HEAD(&newrec->endp_list);
694 INIT_LIST_HEAD(&newrec->ctrl_list);
695 INIT_LIST_HEAD(&newrec->ls_req_list);
696 kref_init(&newrec->ref);
697 atomic_set(&newrec->act_ctrl_cnt, 0);
698 spin_lock_init(&newrec->lock);
699 newrec->remoteport.localport = &lport->localport;
700 newrec->dev = lport->dev;
701 newrec->lport = lport;
702 newrec->remoteport.private = &newrec[1];
703 newrec->remoteport.port_role = pinfo->port_role;
704 newrec->remoteport.node_name = pinfo->node_name;
705 newrec->remoteport.port_name = pinfo->port_name;
706 newrec->remoteport.port_id = pinfo->port_id;
707 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
708 newrec->remoteport.port_num = idx;
709 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
710
711 spin_lock_irqsave(&nvme_fc_lock, flags);
712 list_add_tail(&newrec->endp_list, &lport->endp_list);
713 spin_unlock_irqrestore(&nvme_fc_lock, flags);
714
715 nvme_fc_signal_discovery_scan(lport, newrec);
716
717 *portptr = &newrec->remoteport;
718 return 0;
719
720out_kfree_rport:
721 kfree(newrec);
722out_lport_put:
723 nvme_fc_lport_put(lport);
724out_reghost_failed:
725 *portptr = NULL;
726 return ret;
727}
728EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
729
730static int
731nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
732{
733 struct nvmefc_ls_req_op *lsop;
734 unsigned long flags;
735
736restart:
737 spin_lock_irqsave(&rport->lock, flags);
738
739 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
740 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
741 lsop->flags |= FCOP_FLAGS_TERMIO;
742 spin_unlock_irqrestore(&rport->lock, flags);
743 rport->lport->ops->ls_abort(&rport->lport->localport,
744 &rport->remoteport,
745 &lsop->ls_req);
746 goto restart;
747 }
748 }
749 spin_unlock_irqrestore(&rport->lock, flags);
750
751 return 0;
752}
753
754static void
755nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
756{
757 dev_info(ctrl->ctrl.device,
758 "NVME-FC{%d}: controller connectivity lost. Awaiting "
759 "Reconnect", ctrl->cnum);
760
761 switch (ctrl->ctrl.state) {
762 case NVME_CTRL_NEW:
763 case NVME_CTRL_LIVE:
764
765
766
767
768
769
770
771 if (nvme_reset_ctrl(&ctrl->ctrl)) {
772 dev_warn(ctrl->ctrl.device,
773 "NVME-FC{%d}: Couldn't schedule reset.\n",
774 ctrl->cnum);
775 nvme_delete_ctrl(&ctrl->ctrl);
776 }
777 break;
778
779 case NVME_CTRL_CONNECTING:
780
781
782
783
784
785
786
787 break;
788
789 case NVME_CTRL_RESETTING:
790
791
792
793
794
795
796 break;
797
798 case NVME_CTRL_DELETING:
799 default:
800
801 break;
802 }
803}
804
805
806
807
808
809
810
811
812
813
814
815
816int
817nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
818{
819 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
820 struct nvme_fc_ctrl *ctrl;
821 unsigned long flags;
822
823 if (!portptr)
824 return -EINVAL;
825
826 spin_lock_irqsave(&rport->lock, flags);
827
828 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
829 spin_unlock_irqrestore(&rport->lock, flags);
830 return -EINVAL;
831 }
832 portptr->port_state = FC_OBJSTATE_DELETED;
833
834 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
835
836 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
837
838 if (!portptr->dev_loss_tmo) {
839 dev_warn(ctrl->ctrl.device,
840 "NVME-FC{%d}: controller connectivity lost.\n",
841 ctrl->cnum);
842 nvme_delete_ctrl(&ctrl->ctrl);
843 } else
844 nvme_fc_ctrl_connectivity_loss(ctrl);
845 }
846
847 spin_unlock_irqrestore(&rport->lock, flags);
848
849 nvme_fc_abort_lsops(rport);
850
851 if (atomic_read(&rport->act_ctrl_cnt) == 0)
852 rport->lport->ops->remoteport_delete(portptr);
853
854
855
856
857
858
859 nvme_fc_rport_put(rport);
860
861 return 0;
862}
863EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
864
865
866
867
868
869
870
871
872
873void
874nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
875{
876 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
877
878 nvme_fc_signal_discovery_scan(rport->lport, rport);
879}
880EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
881
882int
883nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
884 u32 dev_loss_tmo)
885{
886 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
887 unsigned long flags;
888
889 spin_lock_irqsave(&rport->lock, flags);
890
891 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
892 spin_unlock_irqrestore(&rport->lock, flags);
893 return -EINVAL;
894 }
895
896
897 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
898
899 spin_unlock_irqrestore(&rport->lock, flags);
900
901 return 0;
902}
903EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924static inline dma_addr_t
925fc_dma_map_single(struct device *dev, void *ptr, size_t size,
926 enum dma_data_direction dir)
927{
928 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
929}
930
931static inline int
932fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
933{
934 return dev ? dma_mapping_error(dev, dma_addr) : 0;
935}
936
937static inline void
938fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
939 enum dma_data_direction dir)
940{
941 if (dev)
942 dma_unmap_single(dev, addr, size, dir);
943}
944
945static inline void
946fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
947 enum dma_data_direction dir)
948{
949 if (dev)
950 dma_sync_single_for_cpu(dev, addr, size, dir);
951}
952
953static inline void
954fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
955 enum dma_data_direction dir)
956{
957 if (dev)
958 dma_sync_single_for_device(dev, addr, size, dir);
959}
960
961
962static int
963fc_map_sg(struct scatterlist *sg, int nents)
964{
965 struct scatterlist *s;
966 int i;
967
968 WARN_ON(nents == 0 || sg[0].length == 0);
969
970 for_each_sg(sg, s, nents, i) {
971 s->dma_address = 0L;
972#ifdef CONFIG_NEED_SG_DMA_LENGTH
973 s->dma_length = s->length;
974#endif
975 }
976 return nents;
977}
978
979static inline int
980fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
981 enum dma_data_direction dir)
982{
983 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
984}
985
986static inline void
987fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
988 enum dma_data_direction dir)
989{
990 if (dev)
991 dma_unmap_sg(dev, sg, nents, dir);
992}
993
994
995
996static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
997static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
998
999
1000static void
1001__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1002{
1003 struct nvme_fc_rport *rport = lsop->rport;
1004 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1005 unsigned long flags;
1006
1007 spin_lock_irqsave(&rport->lock, flags);
1008
1009 if (!lsop->req_queued) {
1010 spin_unlock_irqrestore(&rport->lock, flags);
1011 return;
1012 }
1013
1014 list_del(&lsop->lsreq_list);
1015
1016 lsop->req_queued = false;
1017
1018 spin_unlock_irqrestore(&rport->lock, flags);
1019
1020 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1021 (lsreq->rqstlen + lsreq->rsplen),
1022 DMA_BIDIRECTIONAL);
1023
1024 nvme_fc_rport_put(rport);
1025}
1026
1027static int
1028__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1029 struct nvmefc_ls_req_op *lsop,
1030 void (*done)(struct nvmefc_ls_req *req, int status))
1031{
1032 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1033 unsigned long flags;
1034 int ret = 0;
1035
1036 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1037 return -ECONNREFUSED;
1038
1039 if (!nvme_fc_rport_get(rport))
1040 return -ESHUTDOWN;
1041
1042 lsreq->done = done;
1043 lsop->rport = rport;
1044 lsop->req_queued = false;
1045 INIT_LIST_HEAD(&lsop->lsreq_list);
1046 init_completion(&lsop->ls_done);
1047
1048 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1049 lsreq->rqstlen + lsreq->rsplen,
1050 DMA_BIDIRECTIONAL);
1051 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1052 ret = -EFAULT;
1053 goto out_putrport;
1054 }
1055 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1056
1057 spin_lock_irqsave(&rport->lock, flags);
1058
1059 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1060
1061 lsop->req_queued = true;
1062
1063 spin_unlock_irqrestore(&rport->lock, flags);
1064
1065 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1066 &rport->remoteport, lsreq);
1067 if (ret)
1068 goto out_unlink;
1069
1070 return 0;
1071
1072out_unlink:
1073 lsop->ls_error = ret;
1074 spin_lock_irqsave(&rport->lock, flags);
1075 lsop->req_queued = false;
1076 list_del(&lsop->lsreq_list);
1077 spin_unlock_irqrestore(&rport->lock, flags);
1078 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1079 (lsreq->rqstlen + lsreq->rsplen),
1080 DMA_BIDIRECTIONAL);
1081out_putrport:
1082 nvme_fc_rport_put(rport);
1083
1084 return ret;
1085}
1086
1087static void
1088nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1089{
1090 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1091
1092 lsop->ls_error = status;
1093 complete(&lsop->ls_done);
1094}
1095
1096static int
1097nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1098{
1099 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1100 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1101 int ret;
1102
1103 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1104
1105 if (!ret) {
1106
1107
1108
1109
1110
1111
1112 wait_for_completion(&lsop->ls_done);
1113
1114 __nvme_fc_finish_ls_req(lsop);
1115
1116 ret = lsop->ls_error;
1117 }
1118
1119 if (ret)
1120 return ret;
1121
1122
1123 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1124 return -ENXIO;
1125
1126 return 0;
1127}
1128
1129static int
1130nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1131 struct nvmefc_ls_req_op *lsop,
1132 void (*done)(struct nvmefc_ls_req *req, int status))
1133{
1134
1135
1136 return __nvme_fc_send_ls_req(rport, lsop, done);
1137}
1138
1139
1140enum {
1141 VERR_NO_ERROR = 0,
1142 VERR_LSACC = 1,
1143 VERR_LSDESC_RQST = 2,
1144 VERR_LSDESC_RQST_LEN = 3,
1145 VERR_ASSOC_ID = 4,
1146 VERR_ASSOC_ID_LEN = 5,
1147 VERR_CONN_ID = 6,
1148 VERR_CONN_ID_LEN = 7,
1149 VERR_CR_ASSOC = 8,
1150 VERR_CR_ASSOC_ACC_LEN = 9,
1151 VERR_CR_CONN = 10,
1152 VERR_CR_CONN_ACC_LEN = 11,
1153 VERR_DISCONN = 12,
1154 VERR_DISCONN_ACC_LEN = 13,
1155};
1156
1157static char *validation_errors[] = {
1158 "OK",
1159 "Not LS_ACC",
1160 "Not LSDESC_RQST",
1161 "Bad LSDESC_RQST Length",
1162 "Not Association ID",
1163 "Bad Association ID Length",
1164 "Not Connection ID",
1165 "Bad Connection ID Length",
1166 "Not CR_ASSOC Rqst",
1167 "Bad CR_ASSOC ACC Length",
1168 "Not CR_CONN Rqst",
1169 "Bad CR_CONN ACC Length",
1170 "Not Disconnect Rqst",
1171 "Bad Disconnect ACC Length",
1172};
1173
1174static int
1175nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1176 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1177{
1178 struct nvmefc_ls_req_op *lsop;
1179 struct nvmefc_ls_req *lsreq;
1180 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1181 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1182 int ret, fcret = 0;
1183
1184 lsop = kzalloc((sizeof(*lsop) +
1185 ctrl->lport->ops->lsrqst_priv_sz +
1186 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1187 if (!lsop) {
1188 ret = -ENOMEM;
1189 goto out_no_memory;
1190 }
1191 lsreq = &lsop->ls_req;
1192
1193 lsreq->private = (void *)&lsop[1];
1194 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1195 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1196 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1197
1198 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1199 assoc_rqst->desc_list_len =
1200 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1201
1202 assoc_rqst->assoc_cmd.desc_tag =
1203 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1204 assoc_rqst->assoc_cmd.desc_len =
1205 fcnvme_lsdesc_len(
1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1207
1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1210
1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1213 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1214 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1215 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1216 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1217
1218 lsop->queue = queue;
1219 lsreq->rqstaddr = assoc_rqst;
1220 lsreq->rqstlen = sizeof(*assoc_rqst);
1221 lsreq->rspaddr = assoc_acc;
1222 lsreq->rsplen = sizeof(*assoc_acc);
1223 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1224
1225 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1226 if (ret)
1227 goto out_free_buffer;
1228
1229
1230
1231
1232 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1233 fcret = VERR_LSACC;
1234 else if (assoc_acc->hdr.desc_list_len !=
1235 fcnvme_lsdesc_len(
1236 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1237 fcret = VERR_CR_ASSOC_ACC_LEN;
1238 else if (assoc_acc->hdr.rqst.desc_tag !=
1239 cpu_to_be32(FCNVME_LSDESC_RQST))
1240 fcret = VERR_LSDESC_RQST;
1241 else if (assoc_acc->hdr.rqst.desc_len !=
1242 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1243 fcret = VERR_LSDESC_RQST_LEN;
1244 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1245 fcret = VERR_CR_ASSOC;
1246 else if (assoc_acc->associd.desc_tag !=
1247 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1248 fcret = VERR_ASSOC_ID;
1249 else if (assoc_acc->associd.desc_len !=
1250 fcnvme_lsdesc_len(
1251 sizeof(struct fcnvme_lsdesc_assoc_id)))
1252 fcret = VERR_ASSOC_ID_LEN;
1253 else if (assoc_acc->connectid.desc_tag !=
1254 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1255 fcret = VERR_CONN_ID;
1256 else if (assoc_acc->connectid.desc_len !=
1257 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1258 fcret = VERR_CONN_ID_LEN;
1259
1260 if (fcret) {
1261 ret = -EBADF;
1262 dev_err(ctrl->dev,
1263 "q %d connect failed: %s\n",
1264 queue->qnum, validation_errors[fcret]);
1265 } else {
1266 ctrl->association_id =
1267 be64_to_cpu(assoc_acc->associd.association_id);
1268 queue->connection_id =
1269 be64_to_cpu(assoc_acc->connectid.connection_id);
1270 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1271 }
1272
1273out_free_buffer:
1274 kfree(lsop);
1275out_no_memory:
1276 if (ret)
1277 dev_err(ctrl->dev,
1278 "queue %d connect admin queue failed (%d).\n",
1279 queue->qnum, ret);
1280 return ret;
1281}
1282
1283static int
1284nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1285 u16 qsize, u16 ersp_ratio)
1286{
1287 struct nvmefc_ls_req_op *lsop;
1288 struct nvmefc_ls_req *lsreq;
1289 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1290 struct fcnvme_ls_cr_conn_acc *conn_acc;
1291 int ret, fcret = 0;
1292
1293 lsop = kzalloc((sizeof(*lsop) +
1294 ctrl->lport->ops->lsrqst_priv_sz +
1295 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1296 if (!lsop) {
1297 ret = -ENOMEM;
1298 goto out_no_memory;
1299 }
1300 lsreq = &lsop->ls_req;
1301
1302 lsreq->private = (void *)&lsop[1];
1303 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1304 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1305 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1306
1307 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1308 conn_rqst->desc_list_len = cpu_to_be32(
1309 sizeof(struct fcnvme_lsdesc_assoc_id) +
1310 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1311
1312 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1313 conn_rqst->associd.desc_len =
1314 fcnvme_lsdesc_len(
1315 sizeof(struct fcnvme_lsdesc_assoc_id));
1316 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1317 conn_rqst->connect_cmd.desc_tag =
1318 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1319 conn_rqst->connect_cmd.desc_len =
1320 fcnvme_lsdesc_len(
1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1325
1326 lsop->queue = queue;
1327 lsreq->rqstaddr = conn_rqst;
1328 lsreq->rqstlen = sizeof(*conn_rqst);
1329 lsreq->rspaddr = conn_acc;
1330 lsreq->rsplen = sizeof(*conn_acc);
1331 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1332
1333 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1334 if (ret)
1335 goto out_free_buffer;
1336
1337
1338
1339
1340 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1341 fcret = VERR_LSACC;
1342 else if (conn_acc->hdr.desc_list_len !=
1343 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1344 fcret = VERR_CR_CONN_ACC_LEN;
1345 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1346 fcret = VERR_LSDESC_RQST;
1347 else if (conn_acc->hdr.rqst.desc_len !=
1348 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1349 fcret = VERR_LSDESC_RQST_LEN;
1350 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1351 fcret = VERR_CR_CONN;
1352 else if (conn_acc->connectid.desc_tag !=
1353 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1354 fcret = VERR_CONN_ID;
1355 else if (conn_acc->connectid.desc_len !=
1356 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1357 fcret = VERR_CONN_ID_LEN;
1358
1359 if (fcret) {
1360 ret = -EBADF;
1361 dev_err(ctrl->dev,
1362 "q %d connect failed: %s\n",
1363 queue->qnum, validation_errors[fcret]);
1364 } else {
1365 queue->connection_id =
1366 be64_to_cpu(conn_acc->connectid.connection_id);
1367 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1368 }
1369
1370out_free_buffer:
1371 kfree(lsop);
1372out_no_memory:
1373 if (ret)
1374 dev_err(ctrl->dev,
1375 "queue %d connect command failed (%d).\n",
1376 queue->qnum, ret);
1377 return ret;
1378}
1379
1380static void
1381nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1382{
1383 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1384
1385 __nvme_fc_finish_ls_req(lsop);
1386
1387
1388
1389 kfree(lsop);
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static void
1410nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1411{
1412 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1413 struct fcnvme_ls_disconnect_acc *discon_acc;
1414 struct nvmefc_ls_req_op *lsop;
1415 struct nvmefc_ls_req *lsreq;
1416 int ret;
1417
1418 lsop = kzalloc((sizeof(*lsop) +
1419 ctrl->lport->ops->lsrqst_priv_sz +
1420 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1421 GFP_KERNEL);
1422 if (!lsop)
1423
1424 return;
1425
1426 lsreq = &lsop->ls_req;
1427
1428 lsreq->private = (void *)&lsop[1];
1429 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1430 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1431 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1432
1433 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1434 discon_rqst->desc_list_len = cpu_to_be32(
1435 sizeof(struct fcnvme_lsdesc_assoc_id) +
1436 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1437
1438 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1439 discon_rqst->associd.desc_len =
1440 fcnvme_lsdesc_len(
1441 sizeof(struct fcnvme_lsdesc_assoc_id));
1442
1443 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1444
1445 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1446 FCNVME_LSDESC_DISCONN_CMD);
1447 discon_rqst->discon_cmd.desc_len =
1448 fcnvme_lsdesc_len(
1449 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1450 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1451 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1452
1453 lsreq->rqstaddr = discon_rqst;
1454 lsreq->rqstlen = sizeof(*discon_rqst);
1455 lsreq->rspaddr = discon_acc;
1456 lsreq->rsplen = sizeof(*discon_acc);
1457 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1458
1459 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1460 nvme_fc_disconnect_assoc_done);
1461 if (ret)
1462 kfree(lsop);
1463
1464
1465 ctrl->association_id = 0;
1466}
1467
1468
1469
1470
1471static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1472
1473static int
1474nvme_fc_reinit_request(void *data, struct request *rq)
1475{
1476 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1477 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1478
1479 memset(cmdiu, 0, sizeof(*cmdiu));
1480 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1481 cmdiu->fc_id = NVME_CMD_FC_ID;
1482 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1483 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1484
1485 return 0;
1486}
1487
1488static void
1489__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1490 struct nvme_fc_fcp_op *op)
1491{
1492 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1493 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1494 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1495 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1496
1497 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1498}
1499
1500static void
1501nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1502 unsigned int hctx_idx)
1503{
1504 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1505
1506 return __nvme_fc_exit_request(set->driver_data, op);
1507}
1508
1509static int
1510__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1511{
1512 unsigned long flags;
1513 int opstate;
1514
1515 spin_lock_irqsave(&ctrl->lock, flags);
1516 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1517 if (opstate != FCPOP_STATE_ACTIVE)
1518 atomic_set(&op->state, opstate);
1519 else if (ctrl->flags & FCCTRL_TERMIO)
1520 ctrl->iocnt++;
1521 spin_unlock_irqrestore(&ctrl->lock, flags);
1522
1523 if (opstate != FCPOP_STATE_ACTIVE)
1524 return -ECANCELED;
1525
1526 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1527 &ctrl->rport->remoteport,
1528 op->queue->lldd_handle,
1529 &op->fcp_req);
1530
1531 return 0;
1532}
1533
1534static void
1535nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1536{
1537 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1538 int i;
1539
1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1541 __nvme_fc_abort_op(ctrl, aen_op);
1542}
1543
1544static inline void
1545__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1546 struct nvme_fc_fcp_op *op, int opstate)
1547{
1548 unsigned long flags;
1549
1550 if (opstate == FCPOP_STATE_ABORTED) {
1551 spin_lock_irqsave(&ctrl->lock, flags);
1552 if (ctrl->flags & FCCTRL_TERMIO) {
1553 if (!--ctrl->iocnt)
1554 wake_up(&ctrl->ioabort_wait);
1555 }
1556 spin_unlock_irqrestore(&ctrl->lock, flags);
1557 }
1558}
1559
1560static void
1561nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1562{
1563 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1564 struct request *rq = op->rq;
1565 struct nvmefc_fcp_req *freq = &op->fcp_req;
1566 struct nvme_fc_ctrl *ctrl = op->ctrl;
1567 struct nvme_fc_queue *queue = op->queue;
1568 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1569 struct nvme_command *sqe = &op->cmd_iu.sqe;
1570 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1571 union nvme_result result;
1572 bool terminate_assoc = true;
1573 int opstate;
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1613
1614 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1615 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1616
1617 if (opstate == FCPOP_STATE_ABORTED)
1618 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1619 else if (freq->status)
1620 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1621
1622
1623
1624
1625
1626
1627 if (status)
1628 goto done;
1629
1630
1631
1632
1633
1634
1635
1636
1637 switch (freq->rcv_rsplen) {
1638
1639 case 0:
1640 case NVME_FC_SIZEOF_ZEROS_RSP:
1641
1642
1643
1644
1645
1646 if (freq->transferred_length !=
1647 be32_to_cpu(op->cmd_iu.data_len)) {
1648 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1649 goto done;
1650 }
1651 result.u64 = 0;
1652 break;
1653
1654 case sizeof(struct nvme_fc_ersp_iu):
1655
1656
1657
1658
1659 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1660 (freq->rcv_rsplen / 4) ||
1661 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1662 freq->transferred_length ||
1663 op->rsp_iu.status_code ||
1664 sqe->common.command_id != cqe->command_id)) {
1665 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1666 goto done;
1667 }
1668 result = cqe->result;
1669 status = cqe->status;
1670 break;
1671
1672 default:
1673 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1674 goto done;
1675 }
1676
1677 terminate_assoc = false;
1678
1679done:
1680 if (op->flags & FCOP_FLAGS_AEN) {
1681 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1682 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1683 atomic_set(&op->state, FCPOP_STATE_IDLE);
1684 op->flags = FCOP_FLAGS_AEN;
1685 nvme_fc_ctrl_put(ctrl);
1686 goto check_error;
1687 }
1688
1689
1690
1691
1692
1693 if (status &&
1694 (blk_queue_dying(rq->q) ||
1695 ctrl->ctrl.state == NVME_CTRL_NEW ||
1696 ctrl->ctrl.state == NVME_CTRL_CONNECTING))
1697 status |= cpu_to_le16(NVME_SC_DNR << 1);
1698
1699 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1700 nvme_end_request(rq, status, result);
1701
1702check_error:
1703 if (terminate_assoc)
1704 nvme_fc_error_recovery(ctrl, "transport detected io error");
1705}
1706
1707static int
1708__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1709 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1710 struct request *rq, u32 rqno)
1711{
1712 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1713 int ret = 0;
1714
1715 memset(op, 0, sizeof(*op));
1716 op->fcp_req.cmdaddr = &op->cmd_iu;
1717 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1718 op->fcp_req.rspaddr = &op->rsp_iu;
1719 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1720 op->fcp_req.done = nvme_fc_fcpio_done;
1721 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1722 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1723 op->ctrl = ctrl;
1724 op->queue = queue;
1725 op->rq = rq;
1726 op->rqno = rqno;
1727
1728 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1729 cmdiu->fc_id = NVME_CMD_FC_ID;
1730 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1731
1732 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1733 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1734 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1735 dev_err(ctrl->dev,
1736 "FCP Op failed - cmdiu dma mapping failed.\n");
1737 ret = EFAULT;
1738 goto out_on_error;
1739 }
1740
1741 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1742 &op->rsp_iu, sizeof(op->rsp_iu),
1743 DMA_FROM_DEVICE);
1744 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1745 dev_err(ctrl->dev,
1746 "FCP Op failed - rspiu dma mapping failed.\n");
1747 ret = EFAULT;
1748 }
1749
1750 atomic_set(&op->state, FCPOP_STATE_IDLE);
1751out_on_error:
1752 return ret;
1753}
1754
1755static int
1756nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1757 unsigned int hctx_idx, unsigned int numa_node)
1758{
1759 struct nvme_fc_ctrl *ctrl = set->driver_data;
1760 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1761 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1762 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1763
1764 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1765}
1766
1767static int
1768nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1769{
1770 struct nvme_fc_fcp_op *aen_op;
1771 struct nvme_fc_cmd_iu *cmdiu;
1772 struct nvme_command *sqe;
1773 void *private;
1774 int i, ret;
1775
1776 aen_op = ctrl->aen_ops;
1777 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1778 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1779 GFP_KERNEL);
1780 if (!private)
1781 return -ENOMEM;
1782
1783 cmdiu = &aen_op->cmd_iu;
1784 sqe = &cmdiu->sqe;
1785 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1786 aen_op, (struct request *)NULL,
1787 (NVME_AQ_BLK_MQ_DEPTH + i));
1788 if (ret) {
1789 kfree(private);
1790 return ret;
1791 }
1792
1793 aen_op->flags = FCOP_FLAGS_AEN;
1794 aen_op->fcp_req.first_sgl = NULL;
1795 aen_op->fcp_req.private = private;
1796
1797 memset(sqe, 0, sizeof(*sqe));
1798 sqe->common.opcode = nvme_admin_async_event;
1799
1800 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1801 }
1802 return 0;
1803}
1804
1805static void
1806nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1807{
1808 struct nvme_fc_fcp_op *aen_op;
1809 int i;
1810
1811 aen_op = ctrl->aen_ops;
1812 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1813 if (!aen_op->fcp_req.private)
1814 continue;
1815
1816 __nvme_fc_exit_request(ctrl, aen_op);
1817
1818 kfree(aen_op->fcp_req.private);
1819 aen_op->fcp_req.private = NULL;
1820 }
1821}
1822
1823static inline void
1824__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1825 unsigned int qidx)
1826{
1827 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1828
1829 hctx->driver_data = queue;
1830 queue->hctx = hctx;
1831}
1832
1833static int
1834nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1835 unsigned int hctx_idx)
1836{
1837 struct nvme_fc_ctrl *ctrl = data;
1838
1839 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1840
1841 return 0;
1842}
1843
1844static int
1845nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1846 unsigned int hctx_idx)
1847{
1848 struct nvme_fc_ctrl *ctrl = data;
1849
1850 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1851
1852 return 0;
1853}
1854
1855static void
1856nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1857{
1858 struct nvme_fc_queue *queue;
1859
1860 queue = &ctrl->queues[idx];
1861 memset(queue, 0, sizeof(*queue));
1862 queue->ctrl = ctrl;
1863 queue->qnum = idx;
1864 atomic_set(&queue->csn, 1);
1865 queue->dev = ctrl->dev;
1866
1867 if (idx > 0)
1868 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1869 else
1870 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892static void
1893nvme_fc_free_queue(struct nvme_fc_queue *queue)
1894{
1895 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1896 return;
1897
1898 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1899
1900
1901
1902
1903
1904
1905 queue->connection_id = 0;
1906}
1907
1908static void
1909__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1910 struct nvme_fc_queue *queue, unsigned int qidx)
1911{
1912 if (ctrl->lport->ops->delete_queue)
1913 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1914 queue->lldd_handle);
1915 queue->lldd_handle = NULL;
1916}
1917
1918static void
1919nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1920{
1921 int i;
1922
1923 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1924 nvme_fc_free_queue(&ctrl->queues[i]);
1925}
1926
1927static int
1928__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1929 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1930{
1931 int ret = 0;
1932
1933 queue->lldd_handle = NULL;
1934 if (ctrl->lport->ops->create_queue)
1935 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1936 qidx, qsize, &queue->lldd_handle);
1937
1938 return ret;
1939}
1940
1941static void
1942nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1943{
1944 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1945 int i;
1946
1947 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1948 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1949}
1950
1951static int
1952nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1953{
1954 struct nvme_fc_queue *queue = &ctrl->queues[1];
1955 int i, ret;
1956
1957 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1958 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1959 if (ret)
1960 goto delete_queues;
1961 }
1962
1963 return 0;
1964
1965delete_queues:
1966 for (; i >= 0; i--)
1967 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1968 return ret;
1969}
1970
1971static int
1972nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1973{
1974 int i, ret = 0;
1975
1976 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1977 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1978 (qsize / 5));
1979 if (ret)
1980 break;
1981 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1982 if (ret)
1983 break;
1984
1985 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1986 }
1987
1988 return ret;
1989}
1990
1991static void
1992nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1993{
1994 int i;
1995
1996 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1997 nvme_fc_init_queue(ctrl, i);
1998}
1999
2000static void
2001nvme_fc_ctrl_free(struct kref *ref)
2002{
2003 struct nvme_fc_ctrl *ctrl =
2004 container_of(ref, struct nvme_fc_ctrl, ref);
2005 unsigned long flags;
2006
2007 if (ctrl->ctrl.tagset) {
2008 blk_cleanup_queue(ctrl->ctrl.connect_q);
2009 blk_mq_free_tag_set(&ctrl->tag_set);
2010 }
2011
2012
2013 spin_lock_irqsave(&ctrl->rport->lock, flags);
2014 list_del(&ctrl->ctrl_list);
2015 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2016
2017 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2018 blk_cleanup_queue(ctrl->ctrl.admin_q);
2019 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2020
2021 kfree(ctrl->queues);
2022
2023 put_device(ctrl->dev);
2024 nvme_fc_rport_put(ctrl->rport);
2025
2026 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2027 if (ctrl->ctrl.opts)
2028 nvmf_free_options(ctrl->ctrl.opts);
2029 kfree(ctrl);
2030}
2031
2032static void
2033nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2034{
2035 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2036}
2037
2038static int
2039nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2040{
2041 return kref_get_unless_zero(&ctrl->ref);
2042}
2043
2044
2045
2046
2047
2048static void
2049nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2050{
2051 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2052
2053 WARN_ON(nctrl != &ctrl->ctrl);
2054
2055 nvme_fc_ctrl_put(ctrl);
2056}
2057
2058static void
2059nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2060{
2061
2062 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2063 return;
2064
2065 dev_warn(ctrl->ctrl.device,
2066 "NVME-FC{%d}: transport association error detected: %s\n",
2067 ctrl->cnum, errmsg);
2068 dev_warn(ctrl->ctrl.device,
2069 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2070
2071 nvme_reset_ctrl(&ctrl->ctrl);
2072}
2073
2074static enum blk_eh_timer_return
2075nvme_fc_timeout(struct request *rq, bool reserved)
2076{
2077 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2078 struct nvme_fc_ctrl *ctrl = op->ctrl;
2079
2080
2081
2082
2083
2084
2085
2086
2087 nvme_fc_error_recovery(ctrl, "io timeout error");
2088
2089
2090
2091
2092
2093
2094 return BLK_EH_RESET_TIMER;
2095}
2096
2097static int
2098nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2099 struct nvme_fc_fcp_op *op)
2100{
2101 struct nvmefc_fcp_req *freq = &op->fcp_req;
2102 enum dma_data_direction dir;
2103 int ret;
2104
2105 freq->sg_cnt = 0;
2106
2107 if (!blk_rq_payload_bytes(rq))
2108 return 0;
2109
2110 freq->sg_table.sgl = freq->first_sgl;
2111 ret = sg_alloc_table_chained(&freq->sg_table,
2112 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
2113 if (ret)
2114 return -ENOMEM;
2115
2116 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2117 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2118 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2119 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2120 op->nents, dir);
2121 if (unlikely(freq->sg_cnt <= 0)) {
2122 sg_free_table_chained(&freq->sg_table, true);
2123 freq->sg_cnt = 0;
2124 return -EFAULT;
2125 }
2126
2127
2128
2129
2130 return 0;
2131}
2132
2133static void
2134nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2135 struct nvme_fc_fcp_op *op)
2136{
2137 struct nvmefc_fcp_req *freq = &op->fcp_req;
2138
2139 if (!freq->sg_cnt)
2140 return;
2141
2142 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2143 ((rq_data_dir(rq) == WRITE) ?
2144 DMA_TO_DEVICE : DMA_FROM_DEVICE));
2145
2146 nvme_cleanup_cmd(rq);
2147
2148 sg_free_table_chained(&freq->sg_table, true);
2149
2150 freq->sg_cnt = 0;
2151}
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176static blk_status_t
2177nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2178 struct nvme_fc_fcp_op *op, u32 data_len,
2179 enum nvmefc_fcp_datadir io_dir)
2180{
2181 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2182 struct nvme_command *sqe = &cmdiu->sqe;
2183 u32 csn;
2184 int ret, opstate;
2185
2186
2187
2188
2189
2190 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2191 return BLK_STS_RESOURCE;
2192
2193 if (!nvme_fc_ctrl_get(ctrl))
2194 return BLK_STS_IOERR;
2195
2196
2197 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2198 csn = atomic_inc_return(&queue->csn);
2199 cmdiu->csn = cpu_to_be32(csn);
2200 cmdiu->data_len = cpu_to_be32(data_len);
2201 switch (io_dir) {
2202 case NVMEFC_FCP_WRITE:
2203 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2204 break;
2205 case NVMEFC_FCP_READ:
2206 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2207 break;
2208 case NVMEFC_FCP_NODATA:
2209 cmdiu->flags = 0;
2210 break;
2211 }
2212 op->fcp_req.payload_length = data_len;
2213 op->fcp_req.io_dir = io_dir;
2214 op->fcp_req.transferred_length = 0;
2215 op->fcp_req.rcv_rsplen = 0;
2216 op->fcp_req.status = NVME_SC_SUCCESS;
2217 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2218
2219
2220
2221
2222
2223 WARN_ON_ONCE(sqe->common.metadata);
2224 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2225
2226
2227
2228
2229
2230
2231
2232
2233 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2234 NVME_SGL_FMT_TRANSPORT_A;
2235 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2236 sqe->rw.dptr.sgl.addr = 0;
2237
2238 if (!(op->flags & FCOP_FLAGS_AEN)) {
2239 ret = nvme_fc_map_data(ctrl, op->rq, op);
2240 if (ret < 0) {
2241 nvme_cleanup_cmd(op->rq);
2242 nvme_fc_ctrl_put(ctrl);
2243 if (ret == -ENOMEM || ret == -EAGAIN)
2244 return BLK_STS_RESOURCE;
2245 return BLK_STS_IOERR;
2246 }
2247 }
2248
2249 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2250 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2251
2252 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2253
2254 if (!(op->flags & FCOP_FLAGS_AEN))
2255 blk_mq_start_request(op->rq);
2256
2257 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2258 &ctrl->rport->remoteport,
2259 queue->lldd_handle, &op->fcp_req);
2260
2261 if (ret) {
2262 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2263 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2264
2265 if (!(op->flags & FCOP_FLAGS_AEN))
2266 nvme_fc_unmap_data(ctrl, op->rq, op);
2267
2268 nvme_fc_ctrl_put(ctrl);
2269
2270 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2271 ret != -EBUSY)
2272 return BLK_STS_IOERR;
2273
2274 return BLK_STS_RESOURCE;
2275 }
2276
2277 return BLK_STS_OK;
2278}
2279
2280static blk_status_t
2281nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2282 const struct blk_mq_queue_data *bd)
2283{
2284 struct nvme_ns *ns = hctx->queue->queuedata;
2285 struct nvme_fc_queue *queue = hctx->driver_data;
2286 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2287 struct request *rq = bd->rq;
2288 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2289 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2290 struct nvme_command *sqe = &cmdiu->sqe;
2291 enum nvmefc_fcp_datadir io_dir;
2292 u32 data_len;
2293 blk_status_t ret;
2294
2295 ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
2296 test_bit(NVME_FC_Q_LIVE, &queue->flags),
2297 ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
2298 if (unlikely(ret))
2299 return ret;
2300
2301 ret = nvme_setup_cmd(ns, rq, sqe);
2302 if (ret)
2303 return ret;
2304
2305 data_len = blk_rq_payload_bytes(rq);
2306 if (data_len)
2307 io_dir = ((rq_data_dir(rq) == WRITE) ?
2308 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2309 else
2310 io_dir = NVMEFC_FCP_NODATA;
2311
2312 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2313}
2314
2315static struct blk_mq_tags *
2316nvme_fc_tagset(struct nvme_fc_queue *queue)
2317{
2318 if (queue->qnum == 0)
2319 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2320
2321 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2322}
2323
2324static int
2325nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2326
2327{
2328 struct nvme_fc_queue *queue = hctx->driver_data;
2329 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2330 struct request *req;
2331 struct nvme_fc_fcp_op *op;
2332
2333 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2334 if (!req)
2335 return 0;
2336
2337 op = blk_mq_rq_to_pdu(req);
2338
2339 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2340 (ctrl->lport->ops->poll_queue))
2341 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2342 queue->lldd_handle);
2343
2344 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2345}
2346
2347static void
2348nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2349{
2350 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2351 struct nvme_fc_fcp_op *aen_op;
2352 unsigned long flags;
2353 bool terminating = false;
2354 blk_status_t ret;
2355
2356 spin_lock_irqsave(&ctrl->lock, flags);
2357 if (ctrl->flags & FCCTRL_TERMIO)
2358 terminating = true;
2359 spin_unlock_irqrestore(&ctrl->lock, flags);
2360
2361 if (terminating)
2362 return;
2363
2364 aen_op = &ctrl->aen_ops[0];
2365
2366 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2367 NVMEFC_FCP_NODATA);
2368 if (ret)
2369 dev_err(ctrl->ctrl.device,
2370 "failed async event work\n");
2371}
2372
2373static void
2374nvme_fc_complete_rq(struct request *rq)
2375{
2376 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2377 struct nvme_fc_ctrl *ctrl = op->ctrl;
2378
2379 atomic_set(&op->state, FCPOP_STATE_IDLE);
2380
2381 nvme_fc_unmap_data(ctrl, rq, op);
2382 nvme_complete_rq(rq);
2383 nvme_fc_ctrl_put(ctrl);
2384}
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399static void
2400nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2401{
2402 struct nvme_ctrl *nctrl = data;
2403 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2404 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2405
2406 if (!blk_mq_request_started(req))
2407 return;
2408
2409 __nvme_fc_abort_op(ctrl, op);
2410}
2411
2412
2413static const struct blk_mq_ops nvme_fc_mq_ops = {
2414 .queue_rq = nvme_fc_queue_rq,
2415 .complete = nvme_fc_complete_rq,
2416 .init_request = nvme_fc_init_request,
2417 .exit_request = nvme_fc_exit_request,
2418 .init_hctx = nvme_fc_init_hctx,
2419 .poll = nvme_fc_poll,
2420 .timeout = nvme_fc_timeout,
2421};
2422
2423static int
2424nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2425{
2426 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2427 unsigned int nr_io_queues;
2428 int ret;
2429
2430 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2431 ctrl->lport->ops->max_hw_queues);
2432 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2433 if (ret) {
2434 dev_info(ctrl->ctrl.device,
2435 "set_queue_count failed: %d\n", ret);
2436 return ret;
2437 }
2438
2439 ctrl->ctrl.queue_count = nr_io_queues + 1;
2440 if (!nr_io_queues)
2441 return 0;
2442
2443 nvme_fc_init_io_queues(ctrl);
2444
2445 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2446 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2447 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2448 ctrl->tag_set.reserved_tags = 1;
2449 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2450 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2451 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2452 (SG_CHUNK_SIZE *
2453 sizeof(struct scatterlist)) +
2454 ctrl->lport->ops->fcprqst_priv_sz;
2455 ctrl->tag_set.driver_data = ctrl;
2456 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2457 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2458
2459 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2460 if (ret)
2461 return ret;
2462
2463 ctrl->ctrl.tagset = &ctrl->tag_set;
2464
2465 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2466 if (IS_ERR(ctrl->ctrl.connect_q)) {
2467 ret = PTR_ERR(ctrl->ctrl.connect_q);
2468 goto out_free_tag_set;
2469 }
2470
2471 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2472 if (ret)
2473 goto out_cleanup_blk_queue;
2474
2475 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2476 if (ret)
2477 goto out_delete_hw_queues;
2478
2479 return 0;
2480
2481out_delete_hw_queues:
2482 nvme_fc_delete_hw_io_queues(ctrl);
2483out_cleanup_blk_queue:
2484 blk_cleanup_queue(ctrl->ctrl.connect_q);
2485out_free_tag_set:
2486 blk_mq_free_tag_set(&ctrl->tag_set);
2487 nvme_fc_free_io_queues(ctrl);
2488
2489
2490 ctrl->ctrl.tagset = NULL;
2491
2492 return ret;
2493}
2494
2495static int
2496nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2497{
2498 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2499 unsigned int nr_io_queues;
2500 int ret;
2501
2502 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2503 ctrl->lport->ops->max_hw_queues);
2504 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2505 if (ret) {
2506 dev_info(ctrl->ctrl.device,
2507 "set_queue_count failed: %d\n", ret);
2508 return ret;
2509 }
2510
2511 ctrl->ctrl.queue_count = nr_io_queues + 1;
2512
2513 if (ctrl->ctrl.queue_count == 1)
2514 return 0;
2515
2516 nvme_fc_init_io_queues(ctrl);
2517
2518 ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
2519 if (ret)
2520 goto out_free_io_queues;
2521
2522 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2523 if (ret)
2524 goto out_free_io_queues;
2525
2526 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2527 if (ret)
2528 goto out_delete_hw_queues;
2529
2530 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2531
2532 return 0;
2533
2534out_delete_hw_queues:
2535 nvme_fc_delete_hw_io_queues(ctrl);
2536out_free_io_queues:
2537 nvme_fc_free_io_queues(ctrl);
2538 return ret;
2539}
2540
2541static void
2542nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2543{
2544 struct nvme_fc_lport *lport = rport->lport;
2545
2546 atomic_inc(&lport->act_rport_cnt);
2547}
2548
2549static void
2550nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2551{
2552 struct nvme_fc_lport *lport = rport->lport;
2553 u32 cnt;
2554
2555 cnt = atomic_dec_return(&lport->act_rport_cnt);
2556 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2557 lport->ops->localport_delete(&lport->localport);
2558}
2559
2560static int
2561nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2562{
2563 struct nvme_fc_rport *rport = ctrl->rport;
2564 u32 cnt;
2565
2566 if (ctrl->assoc_active)
2567 return 1;
2568
2569 ctrl->assoc_active = true;
2570 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2571 if (cnt == 1)
2572 nvme_fc_rport_active_on_lport(rport);
2573
2574 return 0;
2575}
2576
2577static int
2578nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2579{
2580 struct nvme_fc_rport *rport = ctrl->rport;
2581 struct nvme_fc_lport *lport = rport->lport;
2582 u32 cnt;
2583
2584
2585
2586 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2587 if (cnt == 0) {
2588 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2589 lport->ops->remoteport_delete(&rport->remoteport);
2590 nvme_fc_rport_inactive_on_lport(rport);
2591 }
2592
2593 return 0;
2594}
2595
2596
2597
2598
2599
2600static int
2601nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2602{
2603 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2604 int ret;
2605 bool changed;
2606
2607 ++ctrl->ctrl.nr_reconnects;
2608
2609 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2610 return -ENODEV;
2611
2612 if (nvme_fc_ctlr_active_on_rport(ctrl))
2613 return -ENOTUNIQ;
2614
2615
2616
2617
2618
2619 nvme_fc_init_queue(ctrl, 0);
2620
2621 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2622 NVME_AQ_DEPTH);
2623 if (ret)
2624 goto out_free_queue;
2625
2626 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2627 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2628 if (ret)
2629 goto out_delete_hw_queue;
2630
2631 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2632 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2633
2634 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2635 if (ret)
2636 goto out_disconnect_admin_queue;
2637
2638 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2639
2640
2641
2642
2643
2644
2645
2646
2647 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2648 if (ret) {
2649 dev_err(ctrl->ctrl.device,
2650 "prop_get NVME_REG_CAP failed\n");
2651 goto out_disconnect_admin_queue;
2652 }
2653
2654 ctrl->ctrl.sqsize =
2655 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2656
2657 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2658 if (ret)
2659 goto out_disconnect_admin_queue;
2660
2661 ctrl->ctrl.max_hw_sectors =
2662 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2663
2664 ret = nvme_init_identify(&ctrl->ctrl);
2665 if (ret)
2666 goto out_disconnect_admin_queue;
2667
2668
2669
2670
2671 if (ctrl->ctrl.icdoff) {
2672 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2673 ctrl->ctrl.icdoff);
2674 goto out_disconnect_admin_queue;
2675 }
2676
2677
2678
2679 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2680
2681 dev_warn(ctrl->ctrl.device,
2682 "queue_size %zu > ctrl maxcmd %u, reducing "
2683 "to queue_size\n",
2684 opts->queue_size, ctrl->ctrl.maxcmd);
2685 opts->queue_size = ctrl->ctrl.maxcmd;
2686 }
2687
2688 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2689
2690 dev_warn(ctrl->ctrl.device,
2691 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2692 opts->queue_size, ctrl->ctrl.sqsize + 1);
2693 opts->queue_size = ctrl->ctrl.sqsize + 1;
2694 }
2695
2696 ret = nvme_fc_init_aen_ops(ctrl);
2697 if (ret)
2698 goto out_term_aen_ops;
2699
2700
2701
2702
2703
2704 if (ctrl->ctrl.queue_count > 1) {
2705 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2706 ret = nvme_fc_create_io_queues(ctrl);
2707 else
2708 ret = nvme_fc_reinit_io_queues(ctrl);
2709 if (ret)
2710 goto out_term_aen_ops;
2711 }
2712
2713 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2714
2715 ctrl->ctrl.nr_reconnects = 0;
2716
2717 if (changed)
2718 nvme_start_ctrl(&ctrl->ctrl);
2719
2720 return 0;
2721
2722out_term_aen_ops:
2723 nvme_fc_term_aen_ops(ctrl);
2724out_disconnect_admin_queue:
2725
2726 nvme_fc_xmt_disconnect_assoc(ctrl);
2727out_delete_hw_queue:
2728 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2729out_free_queue:
2730 nvme_fc_free_queue(&ctrl->queues[0]);
2731 ctrl->assoc_active = false;
2732 nvme_fc_ctlr_inactive_on_rport(ctrl);
2733
2734 return ret;
2735}
2736
2737
2738
2739
2740
2741
2742
2743static void
2744nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2745{
2746 unsigned long flags;
2747
2748 if (!ctrl->assoc_active)
2749 return;
2750 ctrl->assoc_active = false;
2751
2752 spin_lock_irqsave(&ctrl->lock, flags);
2753 ctrl->flags |= FCCTRL_TERMIO;
2754 ctrl->iocnt = 0;
2755 spin_unlock_irqrestore(&ctrl->lock, flags);
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 if (ctrl->ctrl.queue_count > 1) {
2770 nvme_stop_queues(&ctrl->ctrl);
2771 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2772 nvme_fc_terminate_exchange, &ctrl->ctrl);
2773 }
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2793 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2794 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2795 nvme_fc_terminate_exchange, &ctrl->ctrl);
2796
2797
2798 nvme_fc_abort_aen_ops(ctrl);
2799
2800
2801 spin_lock_irq(&ctrl->lock);
2802 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2803 ctrl->flags &= ~FCCTRL_TERMIO;
2804 spin_unlock_irq(&ctrl->lock);
2805
2806 nvme_fc_term_aen_ops(ctrl);
2807
2808
2809
2810
2811
2812
2813
2814 if (ctrl->association_id)
2815 nvme_fc_xmt_disconnect_assoc(ctrl);
2816
2817 if (ctrl->ctrl.tagset) {
2818 nvme_fc_delete_hw_io_queues(ctrl);
2819 nvme_fc_free_io_queues(ctrl);
2820 }
2821
2822 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2823 nvme_fc_free_queue(&ctrl->queues[0]);
2824
2825
2826 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2827
2828 nvme_fc_ctlr_inactive_on_rport(ctrl);
2829}
2830
2831static void
2832nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2833{
2834 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2835
2836 cancel_delayed_work_sync(&ctrl->connect_work);
2837
2838
2839
2840
2841 nvme_fc_delete_association(ctrl);
2842
2843
2844 nvme_start_queues(nctrl);
2845}
2846
2847static void
2848nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2849{
2850 struct nvme_fc_rport *rport = ctrl->rport;
2851 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2852 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2853 bool recon = true;
2854
2855 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2856 return;
2857
2858 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2859 dev_info(ctrl->ctrl.device,
2860 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2861 ctrl->cnum, status);
2862 else if (time_after_eq(jiffies, rport->dev_loss_end))
2863 recon = false;
2864
2865 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2866 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2867 dev_info(ctrl->ctrl.device,
2868 "NVME-FC{%d}: Reconnect attempt in %ld "
2869 "seconds\n",
2870 ctrl->cnum, recon_delay / HZ);
2871 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2872 recon_delay = rport->dev_loss_end - jiffies;
2873
2874 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2875 } else {
2876 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2877 dev_warn(ctrl->ctrl.device,
2878 "NVME-FC{%d}: Max reconnect attempts (%d) "
2879 "reached.\n",
2880 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2881 else
2882 dev_warn(ctrl->ctrl.device,
2883 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2884 "while waiting for remoteport connectivity.\n",
2885 ctrl->cnum, portptr->dev_loss_tmo);
2886 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2887 }
2888}
2889
2890static void
2891nvme_fc_reset_ctrl_work(struct work_struct *work)
2892{
2893 struct nvme_fc_ctrl *ctrl =
2894 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2895 int ret;
2896
2897 nvme_stop_ctrl(&ctrl->ctrl);
2898
2899
2900 nvme_fc_delete_association(ctrl);
2901
2902 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2903 dev_err(ctrl->ctrl.device,
2904 "NVME-FC{%d}: error_recovery: Couldn't change state "
2905 "to CONNECTING\n", ctrl->cnum);
2906 return;
2907 }
2908
2909 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2910 ret = nvme_fc_create_association(ctrl);
2911 else
2912 ret = -ENOTCONN;
2913
2914 if (ret)
2915 nvme_fc_reconnect_or_delete(ctrl, ret);
2916 else
2917 dev_info(ctrl->ctrl.device,
2918 "NVME-FC{%d}: controller reset complete\n",
2919 ctrl->cnum);
2920}
2921
2922static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2923 .name = "fc",
2924 .module = THIS_MODULE,
2925 .flags = NVME_F_FABRICS,
2926 .reg_read32 = nvmf_reg_read32,
2927 .reg_read64 = nvmf_reg_read64,
2928 .reg_write32 = nvmf_reg_write32,
2929 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2930 .submit_async_event = nvme_fc_submit_async_event,
2931 .delete_ctrl = nvme_fc_delete_ctrl,
2932 .get_address = nvmf_get_address,
2933 .reinit_request = nvme_fc_reinit_request,
2934};
2935
2936static void
2937nvme_fc_connect_ctrl_work(struct work_struct *work)
2938{
2939 int ret;
2940
2941 struct nvme_fc_ctrl *ctrl =
2942 container_of(to_delayed_work(work),
2943 struct nvme_fc_ctrl, connect_work);
2944
2945 ret = nvme_fc_create_association(ctrl);
2946 if (ret)
2947 nvme_fc_reconnect_or_delete(ctrl, ret);
2948 else
2949 dev_info(ctrl->ctrl.device,
2950 "NVME-FC{%d}: controller reconnect complete\n",
2951 ctrl->cnum);
2952}
2953
2954
2955static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2956 .queue_rq = nvme_fc_queue_rq,
2957 .complete = nvme_fc_complete_rq,
2958 .init_request = nvme_fc_init_request,
2959 .exit_request = nvme_fc_exit_request,
2960 .init_hctx = nvme_fc_init_admin_hctx,
2961 .timeout = nvme_fc_timeout,
2962};
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973static bool
2974nvme_fc_existing_controller(struct nvme_fc_rport *rport,
2975 struct nvmf_ctrl_options *opts)
2976{
2977 struct nvme_fc_ctrl *ctrl;
2978 unsigned long flags;
2979 bool found = false;
2980
2981 spin_lock_irqsave(&rport->lock, flags);
2982 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
2983 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
2984 if (found)
2985 break;
2986 }
2987 spin_unlock_irqrestore(&rport->lock, flags);
2988
2989 return found;
2990}
2991
2992static struct nvme_ctrl *
2993nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2994 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2995{
2996 struct nvme_fc_ctrl *ctrl;
2997 unsigned long flags;
2998 int ret, idx, retry;
2999
3000 if (!(rport->remoteport.port_role &
3001 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3002 ret = -EBADR;
3003 goto out_fail;
3004 }
3005
3006 if (!opts->duplicate_connect &&
3007 nvme_fc_existing_controller(rport, opts)) {
3008 ret = -EALREADY;
3009 goto out_fail;
3010 }
3011
3012 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3013 if (!ctrl) {
3014 ret = -ENOMEM;
3015 goto out_fail;
3016 }
3017
3018 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3019 if (idx < 0) {
3020 ret = -ENOSPC;
3021 goto out_free_ctrl;
3022 }
3023
3024 ctrl->ctrl.opts = opts;
3025 INIT_LIST_HEAD(&ctrl->ctrl_list);
3026 ctrl->lport = lport;
3027 ctrl->rport = rport;
3028 ctrl->dev = lport->dev;
3029 ctrl->cnum = idx;
3030 ctrl->assoc_active = false;
3031 init_waitqueue_head(&ctrl->ioabort_wait);
3032
3033 get_device(ctrl->dev);
3034 kref_init(&ctrl->ref);
3035
3036 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3037 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3038 spin_lock_init(&ctrl->lock);
3039
3040
3041 ctrl->ctrl.queue_count = min_t(unsigned int,
3042 opts->nr_io_queues,
3043 lport->ops->max_hw_queues);
3044 ctrl->ctrl.queue_count++;
3045
3046 ctrl->ctrl.sqsize = opts->queue_size - 1;
3047 ctrl->ctrl.kato = opts->kato;
3048
3049 ret = -ENOMEM;
3050 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3051 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3052 if (!ctrl->queues)
3053 goto out_free_ida;
3054
3055 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3056 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3057 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3058 ctrl->admin_tag_set.reserved_tags = 2;
3059 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3060 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
3061 (SG_CHUNK_SIZE *
3062 sizeof(struct scatterlist)) +
3063 ctrl->lport->ops->fcprqst_priv_sz;
3064 ctrl->admin_tag_set.driver_data = ctrl;
3065 ctrl->admin_tag_set.nr_hw_queues = 1;
3066 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3067 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3068
3069 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3070 if (ret)
3071 goto out_free_queues;
3072 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3073
3074 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3075 if (IS_ERR(ctrl->ctrl.admin_q)) {
3076 ret = PTR_ERR(ctrl->ctrl.admin_q);
3077 goto out_free_admin_tag_set;
3078 }
3079
3080
3081
3082
3083
3084
3085
3086
3087 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3088 if (ret)
3089 goto out_cleanup_admin_q;
3090
3091
3092
3093 spin_lock_irqsave(&rport->lock, flags);
3094 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3095 spin_unlock_irqrestore(&rport->lock, flags);
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115 for (retry = 0; retry < 3; retry++) {
3116 ret = nvme_fc_create_association(ctrl);
3117 if (!ret)
3118 break;
3119 }
3120
3121 if (ret) {
3122 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3123 cancel_work_sync(&ctrl->ctrl.reset_work);
3124 cancel_delayed_work_sync(&ctrl->connect_work);
3125
3126
3127 dev_err(ctrl->ctrl.device,
3128 "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
3129
3130 ctrl->ctrl.opts = NULL;
3131
3132
3133 nvme_uninit_ctrl(&ctrl->ctrl);
3134
3135
3136 nvme_put_ctrl(&ctrl->ctrl);
3137
3138
3139
3140
3141
3142
3143
3144
3145 nvme_fc_rport_get(rport);
3146
3147 if (ret > 0)
3148 ret = -EIO;
3149 return ERR_PTR(ret);
3150 }
3151
3152 nvme_get_ctrl(&ctrl->ctrl);
3153
3154 dev_info(ctrl->ctrl.device,
3155 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3156 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3157
3158 return &ctrl->ctrl;
3159
3160out_cleanup_admin_q:
3161 blk_cleanup_queue(ctrl->ctrl.admin_q);
3162out_free_admin_tag_set:
3163 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3164out_free_queues:
3165 kfree(ctrl->queues);
3166out_free_ida:
3167 put_device(ctrl->dev);
3168 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3169out_free_ctrl:
3170 kfree(ctrl);
3171out_fail:
3172
3173 return ERR_PTR(ret);
3174}
3175
3176
3177struct nvmet_fc_traddr {
3178 u64 nn;
3179 u64 pn;
3180};
3181
3182static int
3183__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3184{
3185 u64 token64;
3186
3187 if (match_u64(sstr, &token64))
3188 return -EINVAL;
3189 *val = token64;
3190
3191 return 0;
3192}
3193
3194
3195
3196
3197
3198
3199static int
3200nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3201{
3202 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3203 substring_t wwn = { name, &name[sizeof(name)-1] };
3204 int nnoffset, pnoffset;
3205
3206
3207 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3208 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3209 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3210 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3211 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3212 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3213 NVME_FC_TRADDR_OXNNLEN;
3214 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3215 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3216 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3217 "pn-", NVME_FC_TRADDR_NNLEN))) {
3218 nnoffset = NVME_FC_TRADDR_NNLEN;
3219 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3220 } else
3221 goto out_einval;
3222
3223 name[0] = '0';
3224 name[1] = 'x';
3225 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3226
3227 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3228 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3229 goto out_einval;
3230
3231 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3232 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3233 goto out_einval;
3234
3235 return 0;
3236
3237out_einval:
3238 pr_warn("%s: bad traddr string\n", __func__);
3239 return -EINVAL;
3240}
3241
3242static struct nvme_ctrl *
3243nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3244{
3245 struct nvme_fc_lport *lport;
3246 struct nvme_fc_rport *rport;
3247 struct nvme_ctrl *ctrl;
3248 struct nvmet_fc_traddr laddr = { 0L, 0L };
3249 struct nvmet_fc_traddr raddr = { 0L, 0L };
3250 unsigned long flags;
3251 int ret;
3252
3253 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3254 if (ret || !raddr.nn || !raddr.pn)
3255 return ERR_PTR(-EINVAL);
3256
3257 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3258 if (ret || !laddr.nn || !laddr.pn)
3259 return ERR_PTR(-EINVAL);
3260
3261
3262 spin_lock_irqsave(&nvme_fc_lock, flags);
3263 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3264 if (lport->localport.node_name != laddr.nn ||
3265 lport->localport.port_name != laddr.pn)
3266 continue;
3267
3268 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3269 if (rport->remoteport.node_name != raddr.nn ||
3270 rport->remoteport.port_name != raddr.pn)
3271 continue;
3272
3273
3274 if (!nvme_fc_rport_get(rport))
3275 break;
3276
3277 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3278
3279 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3280 if (IS_ERR(ctrl))
3281 nvme_fc_rport_put(rport);
3282 return ctrl;
3283 }
3284 }
3285 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3286
3287 return ERR_PTR(-ENOENT);
3288}
3289
3290
3291static struct nvmf_transport_ops nvme_fc_transport = {
3292 .name = "fc",
3293 .module = THIS_MODULE,
3294 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3295 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3296 .create_ctrl = nvme_fc_create_ctrl,
3297};
3298
3299static int __init nvme_fc_init_module(void)
3300{
3301 int ret;
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317 fc_class = class_create(THIS_MODULE, "fc");
3318 if (IS_ERR(fc_class)) {
3319 pr_err("couldn't register class fc\n");
3320 return PTR_ERR(fc_class);
3321 }
3322
3323
3324
3325
3326 fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
3327 "fc_udev_device");
3328 if (IS_ERR(fc_udev_device)) {
3329 pr_err("couldn't create fc_udev device!\n");
3330 ret = PTR_ERR(fc_udev_device);
3331 goto out_destroy_class;
3332 }
3333
3334 ret = nvmf_register_transport(&nvme_fc_transport);
3335 if (ret)
3336 goto out_destroy_device;
3337
3338 return 0;
3339
3340out_destroy_device:
3341 device_destroy(fc_class, MKDEV(0, 0));
3342out_destroy_class:
3343 class_destroy(fc_class);
3344 return ret;
3345}
3346
3347static void __exit nvme_fc_exit_module(void)
3348{
3349
3350 if (!list_empty(&nvme_fc_lport_list))
3351 pr_warn("%s: localport list not empty\n", __func__);
3352
3353 nvmf_unregister_transport(&nvme_fc_transport);
3354
3355 ida_destroy(&nvme_fc_local_port_cnt);
3356 ida_destroy(&nvme_fc_ctrl_cnt);
3357
3358 device_destroy(fc_class, MKDEV(0, 0));
3359 class_destroy(fc_class);
3360}
3361
3362module_init(nvme_fc_init_module);
3363module_exit(nvme_fc_exit_module);
3364
3365MODULE_LICENSE("GPL v2");
3366