1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9#include <uapi/scsi/fc/fc_els.h>
10#include <linux/delay.h>
11#include <linux/overflow.h>
12
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17#include <scsi/scsi_transport_fc.h>
18
19
20
21
22enum nvme_fc_queue_flags {
23 NVME_FC_Q_CONNECTED = 0,
24 NVME_FC_Q_LIVE,
25};
26
27#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
28
29struct nvme_fc_queue {
30 struct nvme_fc_ctrl *ctrl;
31 struct device *dev;
32 struct blk_mq_hw_ctx *hctx;
33 void *lldd_handle;
34 size_t cmnd_capsule_len;
35 u32 qnum;
36 u32 rqcnt;
37 u32 seqno;
38
39 u64 connection_id;
40 atomic_t csn;
41
42 unsigned long flags;
43} __aligned(sizeof(u64));
44
45enum nvme_fcop_flags {
46 FCOP_FLAGS_TERMIO = (1 << 0),
47 FCOP_FLAGS_AEN = (1 << 1),
48};
49
50struct nvmefc_ls_req_op {
51 struct nvmefc_ls_req ls_req;
52
53 struct nvme_fc_rport *rport;
54 struct nvme_fc_queue *queue;
55 struct request *rq;
56 u32 flags;
57
58 int ls_error;
59 struct completion ls_done;
60 struct list_head lsreq_list;
61 bool req_queued;
62};
63
64enum nvme_fcpop_state {
65 FCPOP_STATE_UNINIT = 0,
66 FCPOP_STATE_IDLE = 1,
67 FCPOP_STATE_ACTIVE = 2,
68 FCPOP_STATE_ABORTED = 3,
69 FCPOP_STATE_COMPLETE = 4,
70};
71
72struct nvme_fc_fcp_op {
73 struct nvme_request nreq;
74
75
76
77
78
79
80
81 struct nvmefc_fcp_req fcp_req;
82
83 struct nvme_fc_ctrl *ctrl;
84 struct nvme_fc_queue *queue;
85 struct request *rq;
86
87 atomic_t state;
88 u32 flags;
89 u32 rqno;
90 u32 nents;
91
92 struct nvme_fc_cmd_iu cmd_iu;
93 struct nvme_fc_ersp_iu rsp_iu;
94};
95
96struct nvme_fcp_op_w_sgl {
97 struct nvme_fc_fcp_op op;
98 struct scatterlist sgl[SG_CHUNK_SIZE];
99 uint8_t priv[0];
100};
101
102struct nvme_fc_lport {
103 struct nvme_fc_local_port localport;
104
105 struct ida endp_cnt;
106 struct list_head port_list;
107 struct list_head endp_list;
108 struct device *dev;
109 struct nvme_fc_port_template *ops;
110 struct kref ref;
111 atomic_t act_rport_cnt;
112} __aligned(sizeof(u64));
113
114struct nvme_fc_rport {
115 struct nvme_fc_remote_port remoteport;
116
117 struct list_head endp_list;
118 struct list_head ctrl_list;
119 struct list_head ls_req_list;
120 struct list_head disc_list;
121 struct device *dev;
122 struct nvme_fc_lport *lport;
123 spinlock_t lock;
124 struct kref ref;
125 atomic_t act_ctrl_cnt;
126 unsigned long dev_loss_end;
127} __aligned(sizeof(u64));
128
129enum nvme_fcctrl_flags {
130 FCCTRL_TERMIO = (1 << 0),
131};
132
133struct nvme_fc_ctrl {
134 spinlock_t lock;
135 struct nvme_fc_queue *queues;
136 struct device *dev;
137 struct nvme_fc_lport *lport;
138 struct nvme_fc_rport *rport;
139 u32 cnum;
140
141 bool ioq_live;
142 bool assoc_active;
143 atomic_t err_work_active;
144 u64 association_id;
145
146 struct list_head ctrl_list;
147
148 struct blk_mq_tag_set admin_tag_set;
149 struct blk_mq_tag_set tag_set;
150
151 struct delayed_work connect_work;
152 struct work_struct err_work;
153
154 struct kref ref;
155 u32 flags;
156 u32 iocnt;
157 wait_queue_head_t ioabort_wait;
158
159 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
160
161 struct nvme_ctrl ctrl;
162};
163
164static inline struct nvme_fc_ctrl *
165to_fc_ctrl(struct nvme_ctrl *ctrl)
166{
167 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
168}
169
170static inline struct nvme_fc_lport *
171localport_to_lport(struct nvme_fc_local_port *portptr)
172{
173 return container_of(portptr, struct nvme_fc_lport, localport);
174}
175
176static inline struct nvme_fc_rport *
177remoteport_to_rport(struct nvme_fc_remote_port *portptr)
178{
179 return container_of(portptr, struct nvme_fc_rport, remoteport);
180}
181
182static inline struct nvmefc_ls_req_op *
183ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
184{
185 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
186}
187
188static inline struct nvme_fc_fcp_op *
189fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
190{
191 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
192}
193
194
195
196
197
198
199static DEFINE_SPINLOCK(nvme_fc_lock);
200
201static LIST_HEAD(nvme_fc_lport_list);
202static DEFINE_IDA(nvme_fc_local_port_cnt);
203static DEFINE_IDA(nvme_fc_ctrl_cnt);
204
205static struct workqueue_struct *nvme_fc_wq;
206
207static bool nvme_fc_waiting_to_unload;
208static DECLARE_COMPLETION(nvme_fc_unload_proceed);
209
210
211
212
213
214static struct device *fc_udev_device;
215
216
217
218
219static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 struct nvme_fc_queue *, unsigned int);
221
222static void
223nvme_fc_free_lport(struct kref *ref)
224{
225 struct nvme_fc_lport *lport =
226 container_of(ref, struct nvme_fc_lport, ref);
227 unsigned long flags;
228
229 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 WARN_ON(!list_empty(&lport->endp_list));
231
232
233 spin_lock_irqsave(&nvme_fc_lock, flags);
234 list_del(&lport->port_list);
235 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
236 complete(&nvme_fc_unload_proceed);
237 spin_unlock_irqrestore(&nvme_fc_lock, flags);
238
239 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
240 ida_destroy(&lport->endp_cnt);
241
242 put_device(lport->dev);
243
244 kfree(lport);
245}
246
247static void
248nvme_fc_lport_put(struct nvme_fc_lport *lport)
249{
250 kref_put(&lport->ref, nvme_fc_free_lport);
251}
252
253static int
254nvme_fc_lport_get(struct nvme_fc_lport *lport)
255{
256 return kref_get_unless_zero(&lport->ref);
257}
258
259
260static struct nvme_fc_lport *
261nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
262 struct nvme_fc_port_template *ops,
263 struct device *dev)
264{
265 struct nvme_fc_lport *lport;
266 unsigned long flags;
267
268 spin_lock_irqsave(&nvme_fc_lock, flags);
269
270 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
271 if (lport->localport.node_name != pinfo->node_name ||
272 lport->localport.port_name != pinfo->port_name)
273 continue;
274
275 if (lport->dev != dev) {
276 lport = ERR_PTR(-EXDEV);
277 goto out_done;
278 }
279
280 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
281 lport = ERR_PTR(-EEXIST);
282 goto out_done;
283 }
284
285 if (!nvme_fc_lport_get(lport)) {
286
287
288
289
290 lport = NULL;
291 goto out_done;
292 }
293
294
295
296 lport->ops = ops;
297 lport->localport.port_role = pinfo->port_role;
298 lport->localport.port_id = pinfo->port_id;
299 lport->localport.port_state = FC_OBJSTATE_ONLINE;
300
301 spin_unlock_irqrestore(&nvme_fc_lock, flags);
302
303 return lport;
304 }
305
306 lport = NULL;
307
308out_done:
309 spin_unlock_irqrestore(&nvme_fc_lock, flags);
310
311 return lport;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331int
332nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
333 struct nvme_fc_port_template *template,
334 struct device *dev,
335 struct nvme_fc_local_port **portptr)
336{
337 struct nvme_fc_lport *newrec;
338 unsigned long flags;
339 int ret, idx;
340
341 if (!template->localport_delete || !template->remoteport_delete ||
342 !template->ls_req || !template->fcp_io ||
343 !template->ls_abort || !template->fcp_abort ||
344 !template->max_hw_queues || !template->max_sgl_segments ||
345 !template->max_dif_sgl_segments || !template->dma_boundary) {
346 ret = -EINVAL;
347 goto out_reghost_failed;
348 }
349
350
351
352
353
354
355
356
357 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
358
359
360 if (IS_ERR(newrec)) {
361 ret = PTR_ERR(newrec);
362 goto out_reghost_failed;
363
364
365 } else if (newrec) {
366 *portptr = &newrec->localport;
367 return 0;
368 }
369
370
371
372 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
373 GFP_KERNEL);
374 if (!newrec) {
375 ret = -ENOMEM;
376 goto out_reghost_failed;
377 }
378
379 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
380 if (idx < 0) {
381 ret = -ENOSPC;
382 goto out_fail_kfree;
383 }
384
385 if (!get_device(dev) && dev) {
386 ret = -ENODEV;
387 goto out_ida_put;
388 }
389
390 INIT_LIST_HEAD(&newrec->port_list);
391 INIT_LIST_HEAD(&newrec->endp_list);
392 kref_init(&newrec->ref);
393 atomic_set(&newrec->act_rport_cnt, 0);
394 newrec->ops = template;
395 newrec->dev = dev;
396 ida_init(&newrec->endp_cnt);
397 newrec->localport.private = &newrec[1];
398 newrec->localport.node_name = pinfo->node_name;
399 newrec->localport.port_name = pinfo->port_name;
400 newrec->localport.port_role = pinfo->port_role;
401 newrec->localport.port_id = pinfo->port_id;
402 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
403 newrec->localport.port_num = idx;
404
405 spin_lock_irqsave(&nvme_fc_lock, flags);
406 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
408
409 if (dev)
410 dma_set_seg_boundary(dev, template->dma_boundary);
411
412 *portptr = &newrec->localport;
413 return 0;
414
415out_ida_put:
416 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
417out_fail_kfree:
418 kfree(newrec);
419out_reghost_failed:
420 *portptr = NULL;
421
422 return ret;
423}
424EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
425
426
427
428
429
430
431
432
433
434
435
436int
437nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
438{
439 struct nvme_fc_lport *lport = localport_to_lport(portptr);
440 unsigned long flags;
441
442 if (!portptr)
443 return -EINVAL;
444
445 spin_lock_irqsave(&nvme_fc_lock, flags);
446
447 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
448 spin_unlock_irqrestore(&nvme_fc_lock, flags);
449 return -EINVAL;
450 }
451 portptr->port_state = FC_OBJSTATE_DELETED;
452
453 spin_unlock_irqrestore(&nvme_fc_lock, flags);
454
455 if (atomic_read(&lport->act_rport_cnt) == 0)
456 lport->ops->localport_delete(&lport->localport);
457
458 nvme_fc_lport_put(lport);
459
460 return 0;
461}
462EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
463
464
465
466
467
468
469
470
471
472#define FCNVME_TRADDR_LENGTH 64
473
474static void
475nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
476 struct nvme_fc_rport *rport)
477{
478 char hostaddr[FCNVME_TRADDR_LENGTH];
479 char tgtaddr[FCNVME_TRADDR_LENGTH];
480 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
481
482 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
483 return;
484
485 snprintf(hostaddr, sizeof(hostaddr),
486 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
487 lport->localport.node_name, lport->localport.port_name);
488 snprintf(tgtaddr, sizeof(tgtaddr),
489 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
490 rport->remoteport.node_name, rport->remoteport.port_name);
491 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
492}
493
494static void
495nvme_fc_free_rport(struct kref *ref)
496{
497 struct nvme_fc_rport *rport =
498 container_of(ref, struct nvme_fc_rport, ref);
499 struct nvme_fc_lport *lport =
500 localport_to_lport(rport->remoteport.localport);
501 unsigned long flags;
502
503 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
504 WARN_ON(!list_empty(&rport->ctrl_list));
505
506
507 spin_lock_irqsave(&nvme_fc_lock, flags);
508 list_del(&rport->endp_list);
509 spin_unlock_irqrestore(&nvme_fc_lock, flags);
510
511 WARN_ON(!list_empty(&rport->disc_list));
512 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
513
514 kfree(rport);
515
516 nvme_fc_lport_put(lport);
517}
518
519static void
520nvme_fc_rport_put(struct nvme_fc_rport *rport)
521{
522 kref_put(&rport->ref, nvme_fc_free_rport);
523}
524
525static int
526nvme_fc_rport_get(struct nvme_fc_rport *rport)
527{
528 return kref_get_unless_zero(&rport->ref);
529}
530
531static void
532nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
533{
534 switch (ctrl->ctrl.state) {
535 case NVME_CTRL_NEW:
536 case NVME_CTRL_CONNECTING:
537
538
539
540
541 dev_info(ctrl->ctrl.device,
542 "NVME-FC{%d}: connectivity re-established. "
543 "Attempting reconnect\n", ctrl->cnum);
544
545 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
546 break;
547
548 case NVME_CTRL_RESETTING:
549
550
551
552
553
554 break;
555
556 default:
557
558 break;
559 }
560}
561
562static struct nvme_fc_rport *
563nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
564 struct nvme_fc_port_info *pinfo)
565{
566 struct nvme_fc_rport *rport;
567 struct nvme_fc_ctrl *ctrl;
568 unsigned long flags;
569
570 spin_lock_irqsave(&nvme_fc_lock, flags);
571
572 list_for_each_entry(rport, &lport->endp_list, endp_list) {
573 if (rport->remoteport.node_name != pinfo->node_name ||
574 rport->remoteport.port_name != pinfo->port_name)
575 continue;
576
577 if (!nvme_fc_rport_get(rport)) {
578 rport = ERR_PTR(-ENOLCK);
579 goto out_done;
580 }
581
582 spin_unlock_irqrestore(&nvme_fc_lock, flags);
583
584 spin_lock_irqsave(&rport->lock, flags);
585
586
587 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
588
589 spin_unlock_irqrestore(&rport->lock, flags);
590 nvme_fc_rport_put(rport);
591 return ERR_PTR(-ESTALE);
592 }
593
594 rport->remoteport.port_role = pinfo->port_role;
595 rport->remoteport.port_id = pinfo->port_id;
596 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
597 rport->dev_loss_end = 0;
598
599
600
601
602
603 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
604 nvme_fc_resume_controller(ctrl);
605
606 spin_unlock_irqrestore(&rport->lock, flags);
607
608 return rport;
609 }
610
611 rport = NULL;
612
613out_done:
614 spin_unlock_irqrestore(&nvme_fc_lock, flags);
615
616 return rport;
617}
618
619static inline void
620__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
621 struct nvme_fc_port_info *pinfo)
622{
623 if (pinfo->dev_loss_tmo)
624 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
625 else
626 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
627}
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645int
646nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
647 struct nvme_fc_port_info *pinfo,
648 struct nvme_fc_remote_port **portptr)
649{
650 struct nvme_fc_lport *lport = localport_to_lport(localport);
651 struct nvme_fc_rport *newrec;
652 unsigned long flags;
653 int ret, idx;
654
655 if (!nvme_fc_lport_get(lport)) {
656 ret = -ESHUTDOWN;
657 goto out_reghost_failed;
658 }
659
660
661
662
663
664
665 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
666
667
668 if (IS_ERR(newrec)) {
669 ret = PTR_ERR(newrec);
670 goto out_lport_put;
671
672
673 } else if (newrec) {
674 nvme_fc_lport_put(lport);
675 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
676 nvme_fc_signal_discovery_scan(lport, newrec);
677 *portptr = &newrec->remoteport;
678 return 0;
679 }
680
681
682
683 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
684 GFP_KERNEL);
685 if (!newrec) {
686 ret = -ENOMEM;
687 goto out_lport_put;
688 }
689
690 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
691 if (idx < 0) {
692 ret = -ENOSPC;
693 goto out_kfree_rport;
694 }
695
696 INIT_LIST_HEAD(&newrec->endp_list);
697 INIT_LIST_HEAD(&newrec->ctrl_list);
698 INIT_LIST_HEAD(&newrec->ls_req_list);
699 INIT_LIST_HEAD(&newrec->disc_list);
700 kref_init(&newrec->ref);
701 atomic_set(&newrec->act_ctrl_cnt, 0);
702 spin_lock_init(&newrec->lock);
703 newrec->remoteport.localport = &lport->localport;
704 newrec->dev = lport->dev;
705 newrec->lport = lport;
706 newrec->remoteport.private = &newrec[1];
707 newrec->remoteport.port_role = pinfo->port_role;
708 newrec->remoteport.node_name = pinfo->node_name;
709 newrec->remoteport.port_name = pinfo->port_name;
710 newrec->remoteport.port_id = pinfo->port_id;
711 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
712 newrec->remoteport.port_num = idx;
713 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
714
715 spin_lock_irqsave(&nvme_fc_lock, flags);
716 list_add_tail(&newrec->endp_list, &lport->endp_list);
717 spin_unlock_irqrestore(&nvme_fc_lock, flags);
718
719 nvme_fc_signal_discovery_scan(lport, newrec);
720
721 *portptr = &newrec->remoteport;
722 return 0;
723
724out_kfree_rport:
725 kfree(newrec);
726out_lport_put:
727 nvme_fc_lport_put(lport);
728out_reghost_failed:
729 *portptr = NULL;
730 return ret;
731}
732EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
733
734static int
735nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
736{
737 struct nvmefc_ls_req_op *lsop;
738 unsigned long flags;
739
740restart:
741 spin_lock_irqsave(&rport->lock, flags);
742
743 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
744 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
745 lsop->flags |= FCOP_FLAGS_TERMIO;
746 spin_unlock_irqrestore(&rport->lock, flags);
747 rport->lport->ops->ls_abort(&rport->lport->localport,
748 &rport->remoteport,
749 &lsop->ls_req);
750 goto restart;
751 }
752 }
753 spin_unlock_irqrestore(&rport->lock, flags);
754
755 return 0;
756}
757
758static void
759nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
760{
761 dev_info(ctrl->ctrl.device,
762 "NVME-FC{%d}: controller connectivity lost. Awaiting "
763 "Reconnect", ctrl->cnum);
764
765 switch (ctrl->ctrl.state) {
766 case NVME_CTRL_NEW:
767 case NVME_CTRL_LIVE:
768
769
770
771
772
773
774
775 if (nvme_reset_ctrl(&ctrl->ctrl)) {
776 dev_warn(ctrl->ctrl.device,
777 "NVME-FC{%d}: Couldn't schedule reset.\n",
778 ctrl->cnum);
779 nvme_delete_ctrl(&ctrl->ctrl);
780 }
781 break;
782
783 case NVME_CTRL_CONNECTING:
784
785
786
787
788
789
790
791 break;
792
793 case NVME_CTRL_RESETTING:
794
795
796
797
798
799
800 break;
801
802 case NVME_CTRL_DELETING:
803 default:
804
805 break;
806 }
807}
808
809
810
811
812
813
814
815
816
817
818
819
820int
821nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
822{
823 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
824 struct nvme_fc_ctrl *ctrl;
825 unsigned long flags;
826
827 if (!portptr)
828 return -EINVAL;
829
830 spin_lock_irqsave(&rport->lock, flags);
831
832 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
833 spin_unlock_irqrestore(&rport->lock, flags);
834 return -EINVAL;
835 }
836 portptr->port_state = FC_OBJSTATE_DELETED;
837
838 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
839
840 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
841
842 if (!portptr->dev_loss_tmo) {
843 dev_warn(ctrl->ctrl.device,
844 "NVME-FC{%d}: controller connectivity lost.\n",
845 ctrl->cnum);
846 nvme_delete_ctrl(&ctrl->ctrl);
847 } else
848 nvme_fc_ctrl_connectivity_loss(ctrl);
849 }
850
851 spin_unlock_irqrestore(&rport->lock, flags);
852
853 nvme_fc_abort_lsops(rport);
854
855 if (atomic_read(&rport->act_ctrl_cnt) == 0)
856 rport->lport->ops->remoteport_delete(portptr);
857
858
859
860
861
862
863 nvme_fc_rport_put(rport);
864
865 return 0;
866}
867EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
868
869
870
871
872
873
874
875
876
877void
878nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
879{
880 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
881
882 nvme_fc_signal_discovery_scan(rport->lport, rport);
883}
884EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
885
886int
887nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
888 u32 dev_loss_tmo)
889{
890 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
891 unsigned long flags;
892
893 spin_lock_irqsave(&rport->lock, flags);
894
895 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
896 spin_unlock_irqrestore(&rport->lock, flags);
897 return -EINVAL;
898 }
899
900
901 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
902
903 spin_unlock_irqrestore(&rport->lock, flags);
904
905 return 0;
906}
907EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928static inline dma_addr_t
929fc_dma_map_single(struct device *dev, void *ptr, size_t size,
930 enum dma_data_direction dir)
931{
932 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
933}
934
935static inline int
936fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
937{
938 return dev ? dma_mapping_error(dev, dma_addr) : 0;
939}
940
941static inline void
942fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
943 enum dma_data_direction dir)
944{
945 if (dev)
946 dma_unmap_single(dev, addr, size, dir);
947}
948
949static inline void
950fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
951 enum dma_data_direction dir)
952{
953 if (dev)
954 dma_sync_single_for_cpu(dev, addr, size, dir);
955}
956
957static inline void
958fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
959 enum dma_data_direction dir)
960{
961 if (dev)
962 dma_sync_single_for_device(dev, addr, size, dir);
963}
964
965
966static int
967fc_map_sg(struct scatterlist *sg, int nents)
968{
969 struct scatterlist *s;
970 int i;
971
972 WARN_ON(nents == 0 || sg[0].length == 0);
973
974 for_each_sg(sg, s, nents, i) {
975 s->dma_address = 0L;
976#ifdef CONFIG_NEED_SG_DMA_LENGTH
977 s->dma_length = s->length;
978#endif
979 }
980 return nents;
981}
982
983static inline int
984fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
985 enum dma_data_direction dir)
986{
987 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
988}
989
990static inline void
991fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
992 enum dma_data_direction dir)
993{
994 if (dev)
995 dma_unmap_sg(dev, sg, nents, dir);
996}
997
998
999
1000static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1001static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1002
1003
1004static void
1005__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1006{
1007 struct nvme_fc_rport *rport = lsop->rport;
1008 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&rport->lock, flags);
1012
1013 if (!lsop->req_queued) {
1014 spin_unlock_irqrestore(&rport->lock, flags);
1015 return;
1016 }
1017
1018 list_del(&lsop->lsreq_list);
1019
1020 lsop->req_queued = false;
1021
1022 spin_unlock_irqrestore(&rport->lock, flags);
1023
1024 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1025 (lsreq->rqstlen + lsreq->rsplen),
1026 DMA_BIDIRECTIONAL);
1027
1028 nvme_fc_rport_put(rport);
1029}
1030
1031static int
1032__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1033 struct nvmefc_ls_req_op *lsop,
1034 void (*done)(struct nvmefc_ls_req *req, int status))
1035{
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1038 int ret = 0;
1039
1040 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1041 return -ECONNREFUSED;
1042
1043 if (!nvme_fc_rport_get(rport))
1044 return -ESHUTDOWN;
1045
1046 lsreq->done = done;
1047 lsop->rport = rport;
1048 lsop->req_queued = false;
1049 INIT_LIST_HEAD(&lsop->lsreq_list);
1050 init_completion(&lsop->ls_done);
1051
1052 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1053 lsreq->rqstlen + lsreq->rsplen,
1054 DMA_BIDIRECTIONAL);
1055 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1056 ret = -EFAULT;
1057 goto out_putrport;
1058 }
1059 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1060
1061 spin_lock_irqsave(&rport->lock, flags);
1062
1063 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1064
1065 lsop->req_queued = true;
1066
1067 spin_unlock_irqrestore(&rport->lock, flags);
1068
1069 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1070 &rport->remoteport, lsreq);
1071 if (ret)
1072 goto out_unlink;
1073
1074 return 0;
1075
1076out_unlink:
1077 lsop->ls_error = ret;
1078 spin_lock_irqsave(&rport->lock, flags);
1079 lsop->req_queued = false;
1080 list_del(&lsop->lsreq_list);
1081 spin_unlock_irqrestore(&rport->lock, flags);
1082 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1083 (lsreq->rqstlen + lsreq->rsplen),
1084 DMA_BIDIRECTIONAL);
1085out_putrport:
1086 nvme_fc_rport_put(rport);
1087
1088 return ret;
1089}
1090
1091static void
1092nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1093{
1094 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1095
1096 lsop->ls_error = status;
1097 complete(&lsop->ls_done);
1098}
1099
1100static int
1101nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1102{
1103 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1104 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1105 int ret;
1106
1107 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1108
1109 if (!ret) {
1110
1111
1112
1113
1114
1115
1116 wait_for_completion(&lsop->ls_done);
1117
1118 __nvme_fc_finish_ls_req(lsop);
1119
1120 ret = lsop->ls_error;
1121 }
1122
1123 if (ret)
1124 return ret;
1125
1126
1127 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1128 return -ENXIO;
1129
1130 return 0;
1131}
1132
1133static int
1134nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1135 struct nvmefc_ls_req_op *lsop,
1136 void (*done)(struct nvmefc_ls_req *req, int status))
1137{
1138
1139
1140 return __nvme_fc_send_ls_req(rport, lsop, done);
1141}
1142
1143
1144enum {
1145 VERR_NO_ERROR = 0,
1146 VERR_LSACC = 1,
1147 VERR_LSDESC_RQST = 2,
1148 VERR_LSDESC_RQST_LEN = 3,
1149 VERR_ASSOC_ID = 4,
1150 VERR_ASSOC_ID_LEN = 5,
1151 VERR_CONN_ID = 6,
1152 VERR_CONN_ID_LEN = 7,
1153 VERR_CR_ASSOC = 8,
1154 VERR_CR_ASSOC_ACC_LEN = 9,
1155 VERR_CR_CONN = 10,
1156 VERR_CR_CONN_ACC_LEN = 11,
1157 VERR_DISCONN = 12,
1158 VERR_DISCONN_ACC_LEN = 13,
1159};
1160
1161static char *validation_errors[] = {
1162 "OK",
1163 "Not LS_ACC",
1164 "Not LSDESC_RQST",
1165 "Bad LSDESC_RQST Length",
1166 "Not Association ID",
1167 "Bad Association ID Length",
1168 "Not Connection ID",
1169 "Bad Connection ID Length",
1170 "Not CR_ASSOC Rqst",
1171 "Bad CR_ASSOC ACC Length",
1172 "Not CR_CONN Rqst",
1173 "Bad CR_CONN ACC Length",
1174 "Not Disconnect Rqst",
1175 "Bad Disconnect ACC Length",
1176};
1177
1178static int
1179nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1180 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1181{
1182 struct nvmefc_ls_req_op *lsop;
1183 struct nvmefc_ls_req *lsreq;
1184 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1185 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1186 int ret, fcret = 0;
1187
1188 lsop = kzalloc((sizeof(*lsop) +
1189 ctrl->lport->ops->lsrqst_priv_sz +
1190 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1191 if (!lsop) {
1192 ret = -ENOMEM;
1193 goto out_no_memory;
1194 }
1195 lsreq = &lsop->ls_req;
1196
1197 lsreq->private = (void *)&lsop[1];
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1199 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1200 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1201
1202 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1203 assoc_rqst->desc_list_len =
1204 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1205
1206 assoc_rqst->assoc_cmd.desc_tag =
1207 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1208 assoc_rqst->assoc_cmd.desc_len =
1209 fcnvme_lsdesc_len(
1210 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1211
1212 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1213 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1214
1215 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1216 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1217 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1218 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1219 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1220 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1221
1222 lsop->queue = queue;
1223 lsreq->rqstaddr = assoc_rqst;
1224 lsreq->rqstlen = sizeof(*assoc_rqst);
1225 lsreq->rspaddr = assoc_acc;
1226 lsreq->rsplen = sizeof(*assoc_acc);
1227 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1228
1229 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1230 if (ret)
1231 goto out_free_buffer;
1232
1233
1234
1235
1236 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1237 fcret = VERR_LSACC;
1238 else if (assoc_acc->hdr.desc_list_len !=
1239 fcnvme_lsdesc_len(
1240 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1241 fcret = VERR_CR_ASSOC_ACC_LEN;
1242 else if (assoc_acc->hdr.rqst.desc_tag !=
1243 cpu_to_be32(FCNVME_LSDESC_RQST))
1244 fcret = VERR_LSDESC_RQST;
1245 else if (assoc_acc->hdr.rqst.desc_len !=
1246 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1247 fcret = VERR_LSDESC_RQST_LEN;
1248 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1249 fcret = VERR_CR_ASSOC;
1250 else if (assoc_acc->associd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1252 fcret = VERR_ASSOC_ID;
1253 else if (assoc_acc->associd.desc_len !=
1254 fcnvme_lsdesc_len(
1255 sizeof(struct fcnvme_lsdesc_assoc_id)))
1256 fcret = VERR_ASSOC_ID_LEN;
1257 else if (assoc_acc->connectid.desc_tag !=
1258 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1259 fcret = VERR_CONN_ID;
1260 else if (assoc_acc->connectid.desc_len !=
1261 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1262 fcret = VERR_CONN_ID_LEN;
1263
1264 if (fcret) {
1265 ret = -EBADF;
1266 dev_err(ctrl->dev,
1267 "q %d connect failed: %s\n",
1268 queue->qnum, validation_errors[fcret]);
1269 } else {
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1275 }
1276
1277out_free_buffer:
1278 kfree(lsop);
1279out_no_memory:
1280 if (ret)
1281 dev_err(ctrl->dev,
1282 "queue %d connect admin queue failed (%d).\n",
1283 queue->qnum, ret);
1284 return ret;
1285}
1286
1287static int
1288nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1289 u16 qsize, u16 ersp_ratio)
1290{
1291 struct nvmefc_ls_req_op *lsop;
1292 struct nvmefc_ls_req *lsreq;
1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1294 struct fcnvme_ls_cr_conn_acc *conn_acc;
1295 int ret, fcret = 0;
1296
1297 lsop = kzalloc((sizeof(*lsop) +
1298 ctrl->lport->ops->lsrqst_priv_sz +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1300 if (!lsop) {
1301 ret = -ENOMEM;
1302 goto out_no_memory;
1303 }
1304 lsreq = &lsop->ls_req;
1305
1306 lsreq->private = (void *)&lsop[1];
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1308 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1310
1311 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1312 conn_rqst->desc_list_len = cpu_to_be32(
1313 sizeof(struct fcnvme_lsdesc_assoc_id) +
1314 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1315
1316 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1317 conn_rqst->associd.desc_len =
1318 fcnvme_lsdesc_len(
1319 sizeof(struct fcnvme_lsdesc_assoc_id));
1320 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1321 conn_rqst->connect_cmd.desc_tag =
1322 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1323 conn_rqst->connect_cmd.desc_len =
1324 fcnvme_lsdesc_len(
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1327 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1328 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1329
1330 lsop->queue = queue;
1331 lsreq->rqstaddr = conn_rqst;
1332 lsreq->rqstlen = sizeof(*conn_rqst);
1333 lsreq->rspaddr = conn_acc;
1334 lsreq->rsplen = sizeof(*conn_acc);
1335 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1336
1337 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1338 if (ret)
1339 goto out_free_buffer;
1340
1341
1342
1343
1344 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1345 fcret = VERR_LSACC;
1346 else if (conn_acc->hdr.desc_list_len !=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1348 fcret = VERR_CR_CONN_ACC_LEN;
1349 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1350 fcret = VERR_LSDESC_RQST;
1351 else if (conn_acc->hdr.rqst.desc_len !=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1353 fcret = VERR_LSDESC_RQST_LEN;
1354 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1355 fcret = VERR_CR_CONN;
1356 else if (conn_acc->connectid.desc_tag !=
1357 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1358 fcret = VERR_CONN_ID;
1359 else if (conn_acc->connectid.desc_len !=
1360 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1361 fcret = VERR_CONN_ID_LEN;
1362
1363 if (fcret) {
1364 ret = -EBADF;
1365 dev_err(ctrl->dev,
1366 "q %d connect failed: %s\n",
1367 queue->qnum, validation_errors[fcret]);
1368 } else {
1369 queue->connection_id =
1370 be64_to_cpu(conn_acc->connectid.connection_id);
1371 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1372 }
1373
1374out_free_buffer:
1375 kfree(lsop);
1376out_no_memory:
1377 if (ret)
1378 dev_err(ctrl->dev,
1379 "queue %d connect command failed (%d).\n",
1380 queue->qnum, ret);
1381 return ret;
1382}
1383
1384static void
1385nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1386{
1387 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1388
1389 __nvme_fc_finish_ls_req(lsop);
1390
1391
1392
1393 kfree(lsop);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static void
1414nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1415{
1416 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1417 struct fcnvme_ls_disconnect_acc *discon_acc;
1418 struct nvmefc_ls_req_op *lsop;
1419 struct nvmefc_ls_req *lsreq;
1420 int ret;
1421
1422 lsop = kzalloc((sizeof(*lsop) +
1423 ctrl->lport->ops->lsrqst_priv_sz +
1424 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1425 GFP_KERNEL);
1426 if (!lsop)
1427
1428 return;
1429
1430 lsreq = &lsop->ls_req;
1431
1432 lsreq->private = (void *)&lsop[1];
1433 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1434 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1435 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1436
1437 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1438 discon_rqst->desc_list_len = cpu_to_be32(
1439 sizeof(struct fcnvme_lsdesc_assoc_id) +
1440 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1441
1442 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1443 discon_rqst->associd.desc_len =
1444 fcnvme_lsdesc_len(
1445 sizeof(struct fcnvme_lsdesc_assoc_id));
1446
1447 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1448
1449 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1450 FCNVME_LSDESC_DISCONN_CMD);
1451 discon_rqst->discon_cmd.desc_len =
1452 fcnvme_lsdesc_len(
1453 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1454 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1455 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1456
1457 lsreq->rqstaddr = discon_rqst;
1458 lsreq->rqstlen = sizeof(*discon_rqst);
1459 lsreq->rspaddr = discon_acc;
1460 lsreq->rsplen = sizeof(*discon_acc);
1461 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1462
1463 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1464 nvme_fc_disconnect_assoc_done);
1465 if (ret)
1466 kfree(lsop);
1467
1468
1469 ctrl->association_id = 0;
1470}
1471
1472
1473
1474
1475static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1476
1477static void
1478__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1479 struct nvme_fc_fcp_op *op)
1480{
1481 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1482 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1483 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1484 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1485
1486 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1487}
1488
1489static void
1490nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1491 unsigned int hctx_idx)
1492{
1493 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1494
1495 return __nvme_fc_exit_request(set->driver_data, op);
1496}
1497
1498static int
1499__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1500{
1501 unsigned long flags;
1502 int opstate;
1503
1504 spin_lock_irqsave(&ctrl->lock, flags);
1505 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1506 if (opstate != FCPOP_STATE_ACTIVE)
1507 atomic_set(&op->state, opstate);
1508 else if (ctrl->flags & FCCTRL_TERMIO)
1509 ctrl->iocnt++;
1510 spin_unlock_irqrestore(&ctrl->lock, flags);
1511
1512 if (opstate != FCPOP_STATE_ACTIVE)
1513 return -ECANCELED;
1514
1515 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1516 &ctrl->rport->remoteport,
1517 op->queue->lldd_handle,
1518 &op->fcp_req);
1519
1520 return 0;
1521}
1522
1523static void
1524nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1525{
1526 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1527 int i;
1528
1529
1530 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1531 return;
1532
1533 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1534 __nvme_fc_abort_op(ctrl, aen_op);
1535}
1536
1537static inline void
1538__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1539 struct nvme_fc_fcp_op *op, int opstate)
1540{
1541 unsigned long flags;
1542
1543 if (opstate == FCPOP_STATE_ABORTED) {
1544 spin_lock_irqsave(&ctrl->lock, flags);
1545 if (ctrl->flags & FCCTRL_TERMIO) {
1546 if (!--ctrl->iocnt)
1547 wake_up(&ctrl->ioabort_wait);
1548 }
1549 spin_unlock_irqrestore(&ctrl->lock, flags);
1550 }
1551}
1552
1553static void
1554nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1555{
1556 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1557 struct request *rq = op->rq;
1558 struct nvmefc_fcp_req *freq = &op->fcp_req;
1559 struct nvme_fc_ctrl *ctrl = op->ctrl;
1560 struct nvme_fc_queue *queue = op->queue;
1561 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1562 struct nvme_command *sqe = &op->cmd_iu.sqe;
1563 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1564 union nvme_result result;
1565 bool terminate_assoc = true;
1566 int opstate;
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1606
1607 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1608 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1609
1610 if (opstate == FCPOP_STATE_ABORTED)
1611 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1612 else if (freq->status)
1613 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1614
1615
1616
1617
1618
1619
1620 if (status)
1621 goto done;
1622
1623
1624
1625
1626
1627
1628
1629
1630 switch (freq->rcv_rsplen) {
1631
1632 case 0:
1633 case NVME_FC_SIZEOF_ZEROS_RSP:
1634
1635
1636
1637
1638
1639 if (freq->transferred_length !=
1640 be32_to_cpu(op->cmd_iu.data_len)) {
1641 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1642 goto done;
1643 }
1644 result.u64 = 0;
1645 break;
1646
1647 case sizeof(struct nvme_fc_ersp_iu):
1648
1649
1650
1651
1652 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1653 (freq->rcv_rsplen / 4) ||
1654 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1655 freq->transferred_length ||
1656 op->rsp_iu.status_code ||
1657 sqe->common.command_id != cqe->command_id)) {
1658 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1659 goto done;
1660 }
1661 result = cqe->result;
1662 status = cqe->status;
1663 break;
1664
1665 default:
1666 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1667 goto done;
1668 }
1669
1670 terminate_assoc = false;
1671
1672done:
1673 if (op->flags & FCOP_FLAGS_AEN) {
1674 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1675 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1676 atomic_set(&op->state, FCPOP_STATE_IDLE);
1677 op->flags = FCOP_FLAGS_AEN;
1678 nvme_fc_ctrl_put(ctrl);
1679 goto check_error;
1680 }
1681
1682 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1683 nvme_end_request(rq, status, result);
1684
1685check_error:
1686 if (terminate_assoc)
1687 nvme_fc_error_recovery(ctrl, "transport detected io error");
1688}
1689
1690static int
1691__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1692 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1693 struct request *rq, u32 rqno)
1694{
1695 struct nvme_fcp_op_w_sgl *op_w_sgl =
1696 container_of(op, typeof(*op_w_sgl), op);
1697 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1698 int ret = 0;
1699
1700 memset(op, 0, sizeof(*op));
1701 op->fcp_req.cmdaddr = &op->cmd_iu;
1702 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1703 op->fcp_req.rspaddr = &op->rsp_iu;
1704 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1705 op->fcp_req.done = nvme_fc_fcpio_done;
1706 op->ctrl = ctrl;
1707 op->queue = queue;
1708 op->rq = rq;
1709 op->rqno = rqno;
1710
1711 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1712 cmdiu->fc_id = NVME_CMD_FC_ID;
1713 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1714
1715 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1716 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1717 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1718 dev_err(ctrl->dev,
1719 "FCP Op failed - cmdiu dma mapping failed.\n");
1720 ret = EFAULT;
1721 goto out_on_error;
1722 }
1723
1724 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1725 &op->rsp_iu, sizeof(op->rsp_iu),
1726 DMA_FROM_DEVICE);
1727 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1728 dev_err(ctrl->dev,
1729 "FCP Op failed - rspiu dma mapping failed.\n");
1730 ret = EFAULT;
1731 }
1732
1733 atomic_set(&op->state, FCPOP_STATE_IDLE);
1734out_on_error:
1735 return ret;
1736}
1737
1738static int
1739nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1740 unsigned int hctx_idx, unsigned int numa_node)
1741{
1742 struct nvme_fc_ctrl *ctrl = set->driver_data;
1743 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
1744 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1745 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1746 int res;
1747
1748 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1749 if (res)
1750 return res;
1751 op->op.fcp_req.first_sgl = &op->sgl[0];
1752 op->op.fcp_req.private = &op->priv[0];
1753 nvme_req(rq)->ctrl = &ctrl->ctrl;
1754 return res;
1755}
1756
1757static int
1758nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1759{
1760 struct nvme_fc_fcp_op *aen_op;
1761 struct nvme_fc_cmd_iu *cmdiu;
1762 struct nvme_command *sqe;
1763 void *private;
1764 int i, ret;
1765
1766 aen_op = ctrl->aen_ops;
1767 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1768 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1769 GFP_KERNEL);
1770 if (!private)
1771 return -ENOMEM;
1772
1773 cmdiu = &aen_op->cmd_iu;
1774 sqe = &cmdiu->sqe;
1775 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1776 aen_op, (struct request *)NULL,
1777 (NVME_AQ_BLK_MQ_DEPTH + i));
1778 if (ret) {
1779 kfree(private);
1780 return ret;
1781 }
1782
1783 aen_op->flags = FCOP_FLAGS_AEN;
1784 aen_op->fcp_req.private = private;
1785
1786 memset(sqe, 0, sizeof(*sqe));
1787 sqe->common.opcode = nvme_admin_async_event;
1788
1789 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1790 }
1791 return 0;
1792}
1793
1794static void
1795nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1796{
1797 struct nvme_fc_fcp_op *aen_op;
1798 int i;
1799
1800 aen_op = ctrl->aen_ops;
1801 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1802 if (!aen_op->fcp_req.private)
1803 continue;
1804
1805 __nvme_fc_exit_request(ctrl, aen_op);
1806
1807 kfree(aen_op->fcp_req.private);
1808 aen_op->fcp_req.private = NULL;
1809 }
1810}
1811
1812static inline void
1813__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1814 unsigned int qidx)
1815{
1816 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1817
1818 hctx->driver_data = queue;
1819 queue->hctx = hctx;
1820}
1821
1822static int
1823nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1824 unsigned int hctx_idx)
1825{
1826 struct nvme_fc_ctrl *ctrl = data;
1827
1828 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1829
1830 return 0;
1831}
1832
1833static int
1834nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1835 unsigned int hctx_idx)
1836{
1837 struct nvme_fc_ctrl *ctrl = data;
1838
1839 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1840
1841 return 0;
1842}
1843
1844static void
1845nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1846{
1847 struct nvme_fc_queue *queue;
1848
1849 queue = &ctrl->queues[idx];
1850 memset(queue, 0, sizeof(*queue));
1851 queue->ctrl = ctrl;
1852 queue->qnum = idx;
1853 atomic_set(&queue->csn, 0);
1854 queue->dev = ctrl->dev;
1855
1856 if (idx > 0)
1857 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1858 else
1859 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static void
1882nvme_fc_free_queue(struct nvme_fc_queue *queue)
1883{
1884 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1885 return;
1886
1887 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1888
1889
1890
1891
1892
1893
1894 queue->connection_id = 0;
1895 atomic_set(&queue->csn, 0);
1896}
1897
1898static void
1899__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1900 struct nvme_fc_queue *queue, unsigned int qidx)
1901{
1902 if (ctrl->lport->ops->delete_queue)
1903 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1904 queue->lldd_handle);
1905 queue->lldd_handle = NULL;
1906}
1907
1908static void
1909nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1910{
1911 int i;
1912
1913 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1914 nvme_fc_free_queue(&ctrl->queues[i]);
1915}
1916
1917static int
1918__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1919 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1920{
1921 int ret = 0;
1922
1923 queue->lldd_handle = NULL;
1924 if (ctrl->lport->ops->create_queue)
1925 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1926 qidx, qsize, &queue->lldd_handle);
1927
1928 return ret;
1929}
1930
1931static void
1932nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1933{
1934 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1935 int i;
1936
1937 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1938 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1939}
1940
1941static int
1942nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1943{
1944 struct nvme_fc_queue *queue = &ctrl->queues[1];
1945 int i, ret;
1946
1947 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1948 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1949 if (ret)
1950 goto delete_queues;
1951 }
1952
1953 return 0;
1954
1955delete_queues:
1956 for (; i >= 0; i--)
1957 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1958 return ret;
1959}
1960
1961static int
1962nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1963{
1964 int i, ret = 0;
1965
1966 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1967 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1968 (qsize / 5));
1969 if (ret)
1970 break;
1971 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
1972 if (ret)
1973 break;
1974
1975 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1976 }
1977
1978 return ret;
1979}
1980
1981static void
1982nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1983{
1984 int i;
1985
1986 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1987 nvme_fc_init_queue(ctrl, i);
1988}
1989
1990static void
1991nvme_fc_ctrl_free(struct kref *ref)
1992{
1993 struct nvme_fc_ctrl *ctrl =
1994 container_of(ref, struct nvme_fc_ctrl, ref);
1995 unsigned long flags;
1996
1997 if (ctrl->ctrl.tagset) {
1998 blk_cleanup_queue(ctrl->ctrl.connect_q);
1999 blk_mq_free_tag_set(&ctrl->tag_set);
2000 }
2001
2002
2003 spin_lock_irqsave(&ctrl->rport->lock, flags);
2004 list_del(&ctrl->ctrl_list);
2005 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2006
2007 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2008 blk_cleanup_queue(ctrl->ctrl.admin_q);
2009 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2010
2011 kfree(ctrl->queues);
2012
2013 put_device(ctrl->dev);
2014 nvme_fc_rport_put(ctrl->rport);
2015
2016 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2017 if (ctrl->ctrl.opts)
2018 nvmf_free_options(ctrl->ctrl.opts);
2019 kfree(ctrl);
2020}
2021
2022static void
2023nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2024{
2025 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2026}
2027
2028static int
2029nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2030{
2031 return kref_get_unless_zero(&ctrl->ref);
2032}
2033
2034
2035
2036
2037
2038static void
2039nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2040{
2041 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2042
2043 WARN_ON(nctrl != &ctrl->ctrl);
2044
2045 nvme_fc_ctrl_put(ctrl);
2046}
2047
2048static void
2049nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2050{
2051 int active;
2052
2053
2054
2055
2056
2057
2058
2059
2060 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2061 active = atomic_xchg(&ctrl->err_work_active, 1);
2062 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
2063 atomic_set(&ctrl->err_work_active, 0);
2064 WARN_ON(1);
2065 }
2066 return;
2067 }
2068
2069
2070 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2071 return;
2072
2073 dev_warn(ctrl->ctrl.device,
2074 "NVME-FC{%d}: transport association error detected: %s\n",
2075 ctrl->cnum, errmsg);
2076 dev_warn(ctrl->ctrl.device,
2077 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2078
2079 nvme_reset_ctrl(&ctrl->ctrl);
2080}
2081
2082static enum blk_eh_timer_return
2083nvme_fc_timeout(struct request *rq, bool reserved)
2084{
2085 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2086 struct nvme_fc_ctrl *ctrl = op->ctrl;
2087
2088
2089
2090
2091
2092
2093
2094
2095 nvme_fc_error_recovery(ctrl, "io timeout error");
2096
2097
2098
2099
2100
2101
2102 return BLK_EH_RESET_TIMER;
2103}
2104
2105static int
2106nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2107 struct nvme_fc_fcp_op *op)
2108{
2109 struct nvmefc_fcp_req *freq = &op->fcp_req;
2110 enum dma_data_direction dir;
2111 int ret;
2112
2113 freq->sg_cnt = 0;
2114
2115 if (!blk_rq_nr_phys_segments(rq))
2116 return 0;
2117
2118 freq->sg_table.sgl = freq->first_sgl;
2119 ret = sg_alloc_table_chained(&freq->sg_table,
2120 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2121 SG_CHUNK_SIZE);
2122 if (ret)
2123 return -ENOMEM;
2124
2125 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2126 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2127 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2128 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2129 op->nents, dir);
2130 if (unlikely(freq->sg_cnt <= 0)) {
2131 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2132 freq->sg_cnt = 0;
2133 return -EFAULT;
2134 }
2135
2136
2137
2138
2139 return 0;
2140}
2141
2142static void
2143nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2144 struct nvme_fc_fcp_op *op)
2145{
2146 struct nvmefc_fcp_req *freq = &op->fcp_req;
2147
2148 if (!freq->sg_cnt)
2149 return;
2150
2151 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2152 ((rq_data_dir(rq) == WRITE) ?
2153 DMA_TO_DEVICE : DMA_FROM_DEVICE));
2154
2155 nvme_cleanup_cmd(rq);
2156
2157 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2158
2159 freq->sg_cnt = 0;
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185static blk_status_t
2186nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2187 struct nvme_fc_fcp_op *op, u32 data_len,
2188 enum nvmefc_fcp_datadir io_dir)
2189{
2190 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2191 struct nvme_command *sqe = &cmdiu->sqe;
2192 int ret, opstate;
2193
2194
2195
2196
2197
2198 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2199 return BLK_STS_RESOURCE;
2200
2201 if (!nvme_fc_ctrl_get(ctrl))
2202 return BLK_STS_IOERR;
2203
2204
2205 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2206 cmdiu->data_len = cpu_to_be32(data_len);
2207 switch (io_dir) {
2208 case NVMEFC_FCP_WRITE:
2209 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2210 break;
2211 case NVMEFC_FCP_READ:
2212 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2213 break;
2214 case NVMEFC_FCP_NODATA:
2215 cmdiu->flags = 0;
2216 break;
2217 }
2218 op->fcp_req.payload_length = data_len;
2219 op->fcp_req.io_dir = io_dir;
2220 op->fcp_req.transferred_length = 0;
2221 op->fcp_req.rcv_rsplen = 0;
2222 op->fcp_req.status = NVME_SC_SUCCESS;
2223 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2224
2225
2226
2227
2228
2229 WARN_ON_ONCE(sqe->common.metadata);
2230 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2231
2232
2233
2234
2235
2236
2237
2238
2239 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2240 NVME_SGL_FMT_TRANSPORT_A;
2241 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2242 sqe->rw.dptr.sgl.addr = 0;
2243
2244 if (!(op->flags & FCOP_FLAGS_AEN)) {
2245 ret = nvme_fc_map_data(ctrl, op->rq, op);
2246 if (ret < 0) {
2247 nvme_cleanup_cmd(op->rq);
2248 nvme_fc_ctrl_put(ctrl);
2249 if (ret == -ENOMEM || ret == -EAGAIN)
2250 return BLK_STS_RESOURCE;
2251 return BLK_STS_IOERR;
2252 }
2253 }
2254
2255 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2256 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2257
2258 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2259
2260 if (!(op->flags & FCOP_FLAGS_AEN))
2261 blk_mq_start_request(op->rq);
2262
2263 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2264 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2265 &ctrl->rport->remoteport,
2266 queue->lldd_handle, &op->fcp_req);
2267
2268 if (ret) {
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2282 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2283
2284 if (!(op->flags & FCOP_FLAGS_AEN))
2285 nvme_fc_unmap_data(ctrl, op->rq, op);
2286
2287 nvme_fc_ctrl_put(ctrl);
2288
2289 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2290 ret != -EBUSY)
2291 return BLK_STS_IOERR;
2292
2293 return BLK_STS_RESOURCE;
2294 }
2295
2296 return BLK_STS_OK;
2297}
2298
2299static blk_status_t
2300nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2301 const struct blk_mq_queue_data *bd)
2302{
2303 struct nvme_ns *ns = hctx->queue->queuedata;
2304 struct nvme_fc_queue *queue = hctx->driver_data;
2305 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2306 struct request *rq = bd->rq;
2307 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2308 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2309 struct nvme_command *sqe = &cmdiu->sqe;
2310 enum nvmefc_fcp_datadir io_dir;
2311 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2312 u32 data_len;
2313 blk_status_t ret;
2314
2315 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2316 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2317 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2318
2319 ret = nvme_setup_cmd(ns, rq, sqe);
2320 if (ret)
2321 return ret;
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331 if (blk_rq_nr_phys_segments(rq)) {
2332 data_len = blk_rq_payload_bytes(rq);
2333 io_dir = ((rq_data_dir(rq) == WRITE) ?
2334 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2335 } else {
2336 data_len = 0;
2337 io_dir = NVMEFC_FCP_NODATA;
2338 }
2339
2340
2341 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2342}
2343
2344static void
2345nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2346{
2347 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2348 struct nvme_fc_fcp_op *aen_op;
2349 unsigned long flags;
2350 bool terminating = false;
2351 blk_status_t ret;
2352
2353 spin_lock_irqsave(&ctrl->lock, flags);
2354 if (ctrl->flags & FCCTRL_TERMIO)
2355 terminating = true;
2356 spin_unlock_irqrestore(&ctrl->lock, flags);
2357
2358 if (terminating)
2359 return;
2360
2361 aen_op = &ctrl->aen_ops[0];
2362
2363 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2364 NVMEFC_FCP_NODATA);
2365 if (ret)
2366 dev_err(ctrl->ctrl.device,
2367 "failed async event work\n");
2368}
2369
2370static void
2371nvme_fc_complete_rq(struct request *rq)
2372{
2373 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2374 struct nvme_fc_ctrl *ctrl = op->ctrl;
2375
2376 atomic_set(&op->state, FCPOP_STATE_IDLE);
2377
2378 nvme_fc_unmap_data(ctrl, rq, op);
2379 nvme_complete_rq(rq);
2380 nvme_fc_ctrl_put(ctrl);
2381}
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396static bool
2397nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2398{
2399 struct nvme_ctrl *nctrl = data;
2400 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2401 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2402
2403 __nvme_fc_abort_op(ctrl, op);
2404 return true;
2405}
2406
2407
2408static const struct blk_mq_ops nvme_fc_mq_ops = {
2409 .queue_rq = nvme_fc_queue_rq,
2410 .complete = nvme_fc_complete_rq,
2411 .init_request = nvme_fc_init_request,
2412 .exit_request = nvme_fc_exit_request,
2413 .init_hctx = nvme_fc_init_hctx,
2414 .timeout = nvme_fc_timeout,
2415};
2416
2417static int
2418nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2419{
2420 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2421 unsigned int nr_io_queues;
2422 int ret;
2423
2424 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2425 ctrl->lport->ops->max_hw_queues);
2426 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2427 if (ret) {
2428 dev_info(ctrl->ctrl.device,
2429 "set_queue_count failed: %d\n", ret);
2430 return ret;
2431 }
2432
2433 ctrl->ctrl.queue_count = nr_io_queues + 1;
2434 if (!nr_io_queues)
2435 return 0;
2436
2437 nvme_fc_init_io_queues(ctrl);
2438
2439 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2440 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2441 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2442 ctrl->tag_set.reserved_tags = 1;
2443 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2444 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2445 ctrl->tag_set.cmd_size =
2446 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2447 ctrl->lport->ops->fcprqst_priv_sz);
2448 ctrl->tag_set.driver_data = ctrl;
2449 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2450 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2451
2452 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2453 if (ret)
2454 return ret;
2455
2456 ctrl->ctrl.tagset = &ctrl->tag_set;
2457
2458 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2459 if (IS_ERR(ctrl->ctrl.connect_q)) {
2460 ret = PTR_ERR(ctrl->ctrl.connect_q);
2461 goto out_free_tag_set;
2462 }
2463
2464 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2465 if (ret)
2466 goto out_cleanup_blk_queue;
2467
2468 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2469 if (ret)
2470 goto out_delete_hw_queues;
2471
2472 ctrl->ioq_live = true;
2473
2474 return 0;
2475
2476out_delete_hw_queues:
2477 nvme_fc_delete_hw_io_queues(ctrl);
2478out_cleanup_blk_queue:
2479 blk_cleanup_queue(ctrl->ctrl.connect_q);
2480out_free_tag_set:
2481 blk_mq_free_tag_set(&ctrl->tag_set);
2482 nvme_fc_free_io_queues(ctrl);
2483
2484
2485 ctrl->ctrl.tagset = NULL;
2486
2487 return ret;
2488}
2489
2490static int
2491nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2492{
2493 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2494 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2495 unsigned int nr_io_queues;
2496 int ret;
2497
2498 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2499 ctrl->lport->ops->max_hw_queues);
2500 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2501 if (ret) {
2502 dev_info(ctrl->ctrl.device,
2503 "set_queue_count failed: %d\n", ret);
2504 return ret;
2505 }
2506
2507 if (!nr_io_queues && prior_ioq_cnt) {
2508 dev_info(ctrl->ctrl.device,
2509 "Fail Reconnect: At least 1 io queue "
2510 "required (was %d)\n", prior_ioq_cnt);
2511 return -ENOSPC;
2512 }
2513
2514 ctrl->ctrl.queue_count = nr_io_queues + 1;
2515
2516 if (ctrl->ctrl.queue_count == 1)
2517 return 0;
2518
2519 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2520 if (ret)
2521 goto out_free_io_queues;
2522
2523 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2524 if (ret)
2525 goto out_delete_hw_queues;
2526
2527 if (prior_ioq_cnt != nr_io_queues)
2528 dev_info(ctrl->ctrl.device,
2529 "reconnect: revising io queue count from %d to %d\n",
2530 prior_ioq_cnt, nr_io_queues);
2531 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2532
2533 return 0;
2534
2535out_delete_hw_queues:
2536 nvme_fc_delete_hw_io_queues(ctrl);
2537out_free_io_queues:
2538 nvme_fc_free_io_queues(ctrl);
2539 return ret;
2540}
2541
2542static void
2543nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2544{
2545 struct nvme_fc_lport *lport = rport->lport;
2546
2547 atomic_inc(&lport->act_rport_cnt);
2548}
2549
2550static void
2551nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2552{
2553 struct nvme_fc_lport *lport = rport->lport;
2554 u32 cnt;
2555
2556 cnt = atomic_dec_return(&lport->act_rport_cnt);
2557 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2558 lport->ops->localport_delete(&lport->localport);
2559}
2560
2561static int
2562nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2563{
2564 struct nvme_fc_rport *rport = ctrl->rport;
2565 u32 cnt;
2566
2567 if (ctrl->assoc_active)
2568 return 1;
2569
2570 ctrl->assoc_active = true;
2571 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2572 if (cnt == 1)
2573 nvme_fc_rport_active_on_lport(rport);
2574
2575 return 0;
2576}
2577
2578static int
2579nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2580{
2581 struct nvme_fc_rport *rport = ctrl->rport;
2582 struct nvme_fc_lport *lport = rport->lport;
2583 u32 cnt;
2584
2585
2586
2587 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2588 if (cnt == 0) {
2589 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2590 lport->ops->remoteport_delete(&rport->remoteport);
2591 nvme_fc_rport_inactive_on_lport(rport);
2592 }
2593
2594 return 0;
2595}
2596
2597
2598
2599
2600
2601static int
2602nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2603{
2604 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2605 int ret;
2606 bool changed;
2607
2608 ++ctrl->ctrl.nr_reconnects;
2609
2610 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2611 return -ENODEV;
2612
2613 if (nvme_fc_ctlr_active_on_rport(ctrl))
2614 return -ENOTUNIQ;
2615
2616 dev_info(ctrl->ctrl.device,
2617 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2618 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2619 ctrl->cnum, ctrl->lport->localport.port_name,
2620 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2621
2622
2623
2624
2625
2626 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2627 NVME_AQ_DEPTH);
2628 if (ret)
2629 goto out_free_queue;
2630
2631 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2632 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2633 if (ret)
2634 goto out_delete_hw_queue;
2635
2636 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2637
2638 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2639 if (ret)
2640 goto out_disconnect_admin_queue;
2641
2642 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2643
2644
2645
2646
2647
2648
2649
2650
2651 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2652 if (ret) {
2653 dev_err(ctrl->ctrl.device,
2654 "prop_get NVME_REG_CAP failed\n");
2655 goto out_disconnect_admin_queue;
2656 }
2657
2658 ctrl->ctrl.sqsize =
2659 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2660
2661 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2662 if (ret)
2663 goto out_disconnect_admin_queue;
2664
2665 ctrl->ctrl.max_hw_sectors =
2666 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2667
2668 ret = nvme_init_identify(&ctrl->ctrl);
2669 if (ret)
2670 goto out_disconnect_admin_queue;
2671
2672
2673
2674
2675 if (ctrl->ctrl.icdoff) {
2676 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2677 ctrl->ctrl.icdoff);
2678 goto out_disconnect_admin_queue;
2679 }
2680
2681
2682
2683 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2684
2685 dev_warn(ctrl->ctrl.device,
2686 "queue_size %zu > ctrl maxcmd %u, reducing "
2687 "to queue_size\n",
2688 opts->queue_size, ctrl->ctrl.maxcmd);
2689 opts->queue_size = ctrl->ctrl.maxcmd;
2690 }
2691
2692 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2693
2694 dev_warn(ctrl->ctrl.device,
2695 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2696 opts->queue_size, ctrl->ctrl.sqsize + 1);
2697 opts->queue_size = ctrl->ctrl.sqsize + 1;
2698 }
2699
2700 ret = nvme_fc_init_aen_ops(ctrl);
2701 if (ret)
2702 goto out_term_aen_ops;
2703
2704
2705
2706
2707
2708 if (ctrl->ctrl.queue_count > 1) {
2709 if (!ctrl->ioq_live)
2710 ret = nvme_fc_create_io_queues(ctrl);
2711 else
2712 ret = nvme_fc_recreate_io_queues(ctrl);
2713 if (ret)
2714 goto out_term_aen_ops;
2715 }
2716
2717 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2718
2719 ctrl->ctrl.nr_reconnects = 0;
2720
2721 if (changed)
2722 nvme_start_ctrl(&ctrl->ctrl);
2723
2724 return 0;
2725
2726out_term_aen_ops:
2727 nvme_fc_term_aen_ops(ctrl);
2728out_disconnect_admin_queue:
2729
2730 nvme_fc_xmt_disconnect_assoc(ctrl);
2731out_delete_hw_queue:
2732 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2733out_free_queue:
2734 nvme_fc_free_queue(&ctrl->queues[0]);
2735 ctrl->assoc_active = false;
2736 nvme_fc_ctlr_inactive_on_rport(ctrl);
2737
2738 return ret;
2739}
2740
2741
2742
2743
2744
2745
2746
2747static void
2748nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2749{
2750 unsigned long flags;
2751
2752 if (!ctrl->assoc_active)
2753 return;
2754 ctrl->assoc_active = false;
2755
2756 spin_lock_irqsave(&ctrl->lock, flags);
2757 ctrl->flags |= FCCTRL_TERMIO;
2758 ctrl->iocnt = 0;
2759 spin_unlock_irqrestore(&ctrl->lock, flags);
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773 if (ctrl->ctrl.queue_count > 1) {
2774 nvme_stop_queues(&ctrl->ctrl);
2775 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2776 nvme_fc_terminate_exchange, &ctrl->ctrl);
2777 }
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2797 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2798 nvme_fc_terminate_exchange, &ctrl->ctrl);
2799
2800
2801 nvme_fc_abort_aen_ops(ctrl);
2802
2803
2804 spin_lock_irq(&ctrl->lock);
2805 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2806 ctrl->flags &= ~FCCTRL_TERMIO;
2807 spin_unlock_irq(&ctrl->lock);
2808
2809 nvme_fc_term_aen_ops(ctrl);
2810
2811
2812
2813
2814
2815
2816
2817 if (ctrl->association_id)
2818 nvme_fc_xmt_disconnect_assoc(ctrl);
2819
2820 if (ctrl->ctrl.tagset) {
2821 nvme_fc_delete_hw_io_queues(ctrl);
2822 nvme_fc_free_io_queues(ctrl);
2823 }
2824
2825 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2826 nvme_fc_free_queue(&ctrl->queues[0]);
2827
2828
2829 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2830
2831
2832 nvme_start_queues(&ctrl->ctrl);
2833
2834 nvme_fc_ctlr_inactive_on_rport(ctrl);
2835}
2836
2837static void
2838nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2839{
2840 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2841
2842 cancel_work_sync(&ctrl->err_work);
2843 cancel_delayed_work_sync(&ctrl->connect_work);
2844
2845
2846
2847
2848 nvme_fc_delete_association(ctrl);
2849}
2850
2851static void
2852nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2853{
2854 struct nvme_fc_rport *rport = ctrl->rport;
2855 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2856 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2857 bool recon = true;
2858
2859 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2860 return;
2861
2862 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2863 dev_info(ctrl->ctrl.device,
2864 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2865 ctrl->cnum, status);
2866 else if (time_after_eq(jiffies, rport->dev_loss_end))
2867 recon = false;
2868
2869 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2870 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2871 dev_info(ctrl->ctrl.device,
2872 "NVME-FC{%d}: Reconnect attempt in %ld "
2873 "seconds\n",
2874 ctrl->cnum, recon_delay / HZ);
2875 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2876 recon_delay = rport->dev_loss_end - jiffies;
2877
2878 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2879 } else {
2880 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2881 dev_warn(ctrl->ctrl.device,
2882 "NVME-FC{%d}: Max reconnect attempts (%d) "
2883 "reached.\n",
2884 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2885 else
2886 dev_warn(ctrl->ctrl.device,
2887 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2888 "while waiting for remoteport connectivity.\n",
2889 ctrl->cnum, portptr->dev_loss_tmo);
2890 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2891 }
2892}
2893
2894static void
2895__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2896{
2897 nvme_stop_keep_alive(&ctrl->ctrl);
2898
2899
2900 nvme_fc_delete_association(ctrl);
2901
2902 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2903 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2904 dev_err(ctrl->ctrl.device,
2905 "NVME-FC{%d}: error_recovery: Couldn't change state "
2906 "to CONNECTING\n", ctrl->cnum);
2907}
2908
2909static void
2910nvme_fc_reset_ctrl_work(struct work_struct *work)
2911{
2912 struct nvme_fc_ctrl *ctrl =
2913 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2914 int ret;
2915
2916 __nvme_fc_terminate_io(ctrl);
2917
2918 nvme_stop_ctrl(&ctrl->ctrl);
2919
2920 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2921 ret = nvme_fc_create_association(ctrl);
2922 else
2923 ret = -ENOTCONN;
2924
2925 if (ret)
2926 nvme_fc_reconnect_or_delete(ctrl, ret);
2927 else
2928 dev_info(ctrl->ctrl.device,
2929 "NVME-FC{%d}: controller reset complete\n",
2930 ctrl->cnum);
2931}
2932
2933static void
2934nvme_fc_connect_err_work(struct work_struct *work)
2935{
2936 struct nvme_fc_ctrl *ctrl =
2937 container_of(work, struct nvme_fc_ctrl, err_work);
2938
2939 __nvme_fc_terminate_io(ctrl);
2940
2941 atomic_set(&ctrl->err_work_active, 0);
2942
2943
2944
2945
2946
2947
2948
2949}
2950
2951static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2952 .name = "fc",
2953 .module = THIS_MODULE,
2954 .flags = NVME_F_FABRICS,
2955 .reg_read32 = nvmf_reg_read32,
2956 .reg_read64 = nvmf_reg_read64,
2957 .reg_write32 = nvmf_reg_write32,
2958 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2959 .submit_async_event = nvme_fc_submit_async_event,
2960 .delete_ctrl = nvme_fc_delete_ctrl,
2961 .get_address = nvmf_get_address,
2962};
2963
2964static void
2965nvme_fc_connect_ctrl_work(struct work_struct *work)
2966{
2967 int ret;
2968
2969 struct nvme_fc_ctrl *ctrl =
2970 container_of(to_delayed_work(work),
2971 struct nvme_fc_ctrl, connect_work);
2972
2973 ret = nvme_fc_create_association(ctrl);
2974 if (ret)
2975 nvme_fc_reconnect_or_delete(ctrl, ret);
2976 else
2977 dev_info(ctrl->ctrl.device,
2978 "NVME-FC{%d}: controller connect complete\n",
2979 ctrl->cnum);
2980}
2981
2982
2983static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2984 .queue_rq = nvme_fc_queue_rq,
2985 .complete = nvme_fc_complete_rq,
2986 .init_request = nvme_fc_init_request,
2987 .exit_request = nvme_fc_exit_request,
2988 .init_hctx = nvme_fc_init_admin_hctx,
2989 .timeout = nvme_fc_timeout,
2990};
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001static bool
3002nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3003 struct nvmf_ctrl_options *opts)
3004{
3005 struct nvme_fc_ctrl *ctrl;
3006 unsigned long flags;
3007 bool found = false;
3008
3009 spin_lock_irqsave(&rport->lock, flags);
3010 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3011 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3012 if (found)
3013 break;
3014 }
3015 spin_unlock_irqrestore(&rport->lock, flags);
3016
3017 return found;
3018}
3019
3020static struct nvme_ctrl *
3021nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3022 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3023{
3024 struct nvme_fc_ctrl *ctrl;
3025 unsigned long flags;
3026 int ret, idx;
3027
3028 if (!(rport->remoteport.port_role &
3029 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3030 ret = -EBADR;
3031 goto out_fail;
3032 }
3033
3034 if (!opts->duplicate_connect &&
3035 nvme_fc_existing_controller(rport, opts)) {
3036 ret = -EALREADY;
3037 goto out_fail;
3038 }
3039
3040 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3041 if (!ctrl) {
3042 ret = -ENOMEM;
3043 goto out_fail;
3044 }
3045
3046 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3047 if (idx < 0) {
3048 ret = -ENOSPC;
3049 goto out_free_ctrl;
3050 }
3051
3052 ctrl->ctrl.opts = opts;
3053 ctrl->ctrl.nr_reconnects = 0;
3054 if (lport->dev)
3055 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3056 else
3057 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3058 INIT_LIST_HEAD(&ctrl->ctrl_list);
3059 ctrl->lport = lport;
3060 ctrl->rport = rport;
3061 ctrl->dev = lport->dev;
3062 ctrl->cnum = idx;
3063 ctrl->ioq_live = false;
3064 ctrl->assoc_active = false;
3065 atomic_set(&ctrl->err_work_active, 0);
3066 init_waitqueue_head(&ctrl->ioabort_wait);
3067
3068 get_device(ctrl->dev);
3069 kref_init(&ctrl->ref);
3070
3071 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3072 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3073 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3074 spin_lock_init(&ctrl->lock);
3075
3076
3077 ctrl->ctrl.queue_count = min_t(unsigned int,
3078 opts->nr_io_queues,
3079 lport->ops->max_hw_queues);
3080 ctrl->ctrl.queue_count++;
3081
3082 ctrl->ctrl.sqsize = opts->queue_size - 1;
3083 ctrl->ctrl.kato = opts->kato;
3084 ctrl->ctrl.cntlid = 0xffff;
3085
3086 ret = -ENOMEM;
3087 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3088 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3089 if (!ctrl->queues)
3090 goto out_free_ida;
3091
3092 nvme_fc_init_queue(ctrl, 0);
3093
3094 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3095 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3096 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3097 ctrl->admin_tag_set.reserved_tags = 2;
3098 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3099 ctrl->admin_tag_set.cmd_size =
3100 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3101 ctrl->lport->ops->fcprqst_priv_sz);
3102 ctrl->admin_tag_set.driver_data = ctrl;
3103 ctrl->admin_tag_set.nr_hw_queues = 1;
3104 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3105 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3106
3107 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3108 if (ret)
3109 goto out_free_queues;
3110 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3111
3112 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3113 if (IS_ERR(ctrl->ctrl.admin_q)) {
3114 ret = PTR_ERR(ctrl->ctrl.admin_q);
3115 goto out_free_admin_tag_set;
3116 }
3117
3118
3119
3120
3121
3122
3123
3124
3125 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3126 if (ret)
3127 goto out_cleanup_admin_q;
3128
3129
3130
3131 spin_lock_irqsave(&rport->lock, flags);
3132 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3133 spin_unlock_irqrestore(&rport->lock, flags);
3134
3135 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3136 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3137 dev_err(ctrl->ctrl.device,
3138 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3139 goto fail_ctrl;
3140 }
3141
3142 nvme_get_ctrl(&ctrl->ctrl);
3143
3144 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3145 nvme_put_ctrl(&ctrl->ctrl);
3146 dev_err(ctrl->ctrl.device,
3147 "NVME-FC{%d}: failed to schedule initial connect\n",
3148 ctrl->cnum);
3149 goto fail_ctrl;
3150 }
3151
3152 flush_delayed_work(&ctrl->connect_work);
3153
3154 dev_info(ctrl->ctrl.device,
3155 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3156 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3157
3158 return &ctrl->ctrl;
3159
3160fail_ctrl:
3161 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3162 cancel_work_sync(&ctrl->ctrl.reset_work);
3163 cancel_work_sync(&ctrl->err_work);
3164 cancel_delayed_work_sync(&ctrl->connect_work);
3165
3166 ctrl->ctrl.opts = NULL;
3167
3168
3169 nvme_uninit_ctrl(&ctrl->ctrl);
3170
3171
3172 nvme_put_ctrl(&ctrl->ctrl);
3173
3174
3175
3176
3177
3178
3179
3180
3181 nvme_fc_rport_get(rport);
3182
3183 return ERR_PTR(-EIO);
3184
3185out_cleanup_admin_q:
3186 blk_cleanup_queue(ctrl->ctrl.admin_q);
3187out_free_admin_tag_set:
3188 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3189out_free_queues:
3190 kfree(ctrl->queues);
3191out_free_ida:
3192 put_device(ctrl->dev);
3193 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3194out_free_ctrl:
3195 kfree(ctrl);
3196out_fail:
3197
3198 return ERR_PTR(ret);
3199}
3200
3201
3202struct nvmet_fc_traddr {
3203 u64 nn;
3204 u64 pn;
3205};
3206
3207static int
3208__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3209{
3210 u64 token64;
3211
3212 if (match_u64(sstr, &token64))
3213 return -EINVAL;
3214 *val = token64;
3215
3216 return 0;
3217}
3218
3219
3220
3221
3222
3223
3224static int
3225nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3226{
3227 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3228 substring_t wwn = { name, &name[sizeof(name)-1] };
3229 int nnoffset, pnoffset;
3230
3231
3232 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3233 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3234 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3235 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3236 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3237 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3238 NVME_FC_TRADDR_OXNNLEN;
3239 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3240 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3241 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3242 "pn-", NVME_FC_TRADDR_NNLEN))) {
3243 nnoffset = NVME_FC_TRADDR_NNLEN;
3244 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3245 } else
3246 goto out_einval;
3247
3248 name[0] = '0';
3249 name[1] = 'x';
3250 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3251
3252 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3253 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3254 goto out_einval;
3255
3256 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3257 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3258 goto out_einval;
3259
3260 return 0;
3261
3262out_einval:
3263 pr_warn("%s: bad traddr string\n", __func__);
3264 return -EINVAL;
3265}
3266
3267static struct nvme_ctrl *
3268nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3269{
3270 struct nvme_fc_lport *lport;
3271 struct nvme_fc_rport *rport;
3272 struct nvme_ctrl *ctrl;
3273 struct nvmet_fc_traddr laddr = { 0L, 0L };
3274 struct nvmet_fc_traddr raddr = { 0L, 0L };
3275 unsigned long flags;
3276 int ret;
3277
3278 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3279 if (ret || !raddr.nn || !raddr.pn)
3280 return ERR_PTR(-EINVAL);
3281
3282 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3283 if (ret || !laddr.nn || !laddr.pn)
3284 return ERR_PTR(-EINVAL);
3285
3286
3287 spin_lock_irqsave(&nvme_fc_lock, flags);
3288 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3289 if (lport->localport.node_name != laddr.nn ||
3290 lport->localport.port_name != laddr.pn)
3291 continue;
3292
3293 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3294 if (rport->remoteport.node_name != raddr.nn ||
3295 rport->remoteport.port_name != raddr.pn)
3296 continue;
3297
3298
3299 if (!nvme_fc_rport_get(rport))
3300 break;
3301
3302 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3303
3304 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3305 if (IS_ERR(ctrl))
3306 nvme_fc_rport_put(rport);
3307 return ctrl;
3308 }
3309 }
3310 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3311
3312 pr_warn("%s: %s - %s combination not found\n",
3313 __func__, opts->traddr, opts->host_traddr);
3314 return ERR_PTR(-ENOENT);
3315}
3316
3317
3318static struct nvmf_transport_ops nvme_fc_transport = {
3319 .name = "fc",
3320 .module = THIS_MODULE,
3321 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3322 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3323 .create_ctrl = nvme_fc_create_ctrl,
3324};
3325
3326
3327#define DISCOVERY_MAX_FAIL 20
3328
3329static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3330 struct device_attribute *attr, const char *buf, size_t count)
3331{
3332 unsigned long flags;
3333 LIST_HEAD(local_disc_list);
3334 struct nvme_fc_lport *lport;
3335 struct nvme_fc_rport *rport;
3336 int failcnt = 0;
3337
3338 spin_lock_irqsave(&nvme_fc_lock, flags);
3339restart:
3340 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3341 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3342 if (!nvme_fc_lport_get(lport))
3343 continue;
3344 if (!nvme_fc_rport_get(rport)) {
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354 nvme_fc_lport_put(lport);
3355
3356 if (failcnt++ < DISCOVERY_MAX_FAIL)
3357 goto restart;
3358
3359 pr_err("nvme_discovery: too many reference "
3360 "failures\n");
3361 goto process_local_list;
3362 }
3363 if (list_empty(&rport->disc_list))
3364 list_add_tail(&rport->disc_list,
3365 &local_disc_list);
3366 }
3367 }
3368
3369process_local_list:
3370 while (!list_empty(&local_disc_list)) {
3371 rport = list_first_entry(&local_disc_list,
3372 struct nvme_fc_rport, disc_list);
3373 list_del_init(&rport->disc_list);
3374 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3375
3376 lport = rport->lport;
3377
3378 nvme_fc_signal_discovery_scan(lport, rport);
3379 nvme_fc_rport_put(rport);
3380 nvme_fc_lport_put(lport);
3381
3382 spin_lock_irqsave(&nvme_fc_lock, flags);
3383 }
3384 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3385
3386 return count;
3387}
3388static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3389
3390static struct attribute *nvme_fc_attrs[] = {
3391 &dev_attr_nvme_discovery.attr,
3392 NULL
3393};
3394
3395static struct attribute_group nvme_fc_attr_group = {
3396 .attrs = nvme_fc_attrs,
3397};
3398
3399static const struct attribute_group *nvme_fc_attr_groups[] = {
3400 &nvme_fc_attr_group,
3401 NULL
3402};
3403
3404static struct class fc_class = {
3405 .name = "fc",
3406 .dev_groups = nvme_fc_attr_groups,
3407 .owner = THIS_MODULE,
3408};
3409
3410static int __init nvme_fc_init_module(void)
3411{
3412 int ret;
3413
3414 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3415 if (!nvme_fc_wq)
3416 return -ENOMEM;
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432 ret = class_register(&fc_class);
3433 if (ret) {
3434 pr_err("couldn't register class fc\n");
3435 goto out_destroy_wq;
3436 }
3437
3438
3439
3440
3441 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3442 "fc_udev_device");
3443 if (IS_ERR(fc_udev_device)) {
3444 pr_err("couldn't create fc_udev device!\n");
3445 ret = PTR_ERR(fc_udev_device);
3446 goto out_destroy_class;
3447 }
3448
3449 ret = nvmf_register_transport(&nvme_fc_transport);
3450 if (ret)
3451 goto out_destroy_device;
3452
3453 return 0;
3454
3455out_destroy_device:
3456 device_destroy(&fc_class, MKDEV(0, 0));
3457out_destroy_class:
3458 class_unregister(&fc_class);
3459out_destroy_wq:
3460 destroy_workqueue(nvme_fc_wq);
3461
3462 return ret;
3463}
3464
3465static void
3466nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3467{
3468 struct nvme_fc_ctrl *ctrl;
3469
3470 spin_lock(&rport->lock);
3471 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3472 dev_warn(ctrl->ctrl.device,
3473 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3474 ctrl->cnum);
3475 nvme_delete_ctrl(&ctrl->ctrl);
3476 }
3477 spin_unlock(&rport->lock);
3478}
3479
3480static void
3481nvme_fc_cleanup_for_unload(void)
3482{
3483 struct nvme_fc_lport *lport;
3484 struct nvme_fc_rport *rport;
3485
3486 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3487 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3488 nvme_fc_delete_controllers(rport);
3489 }
3490 }
3491}
3492
3493static void __exit nvme_fc_exit_module(void)
3494{
3495 unsigned long flags;
3496 bool need_cleanup = false;
3497
3498 spin_lock_irqsave(&nvme_fc_lock, flags);
3499 nvme_fc_waiting_to_unload = true;
3500 if (!list_empty(&nvme_fc_lport_list)) {
3501 need_cleanup = true;
3502 nvme_fc_cleanup_for_unload();
3503 }
3504 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3505 if (need_cleanup) {
3506 pr_info("%s: waiting for ctlr deletes\n", __func__);
3507 wait_for_completion(&nvme_fc_unload_proceed);
3508 pr_info("%s: ctrl deletes complete\n", __func__);
3509 }
3510
3511 nvmf_unregister_transport(&nvme_fc_transport);
3512
3513 ida_destroy(&nvme_fc_local_port_cnt);
3514 ida_destroy(&nvme_fc_ctrl_cnt);
3515
3516 device_destroy(&fc_class, MKDEV(0, 0));
3517 class_unregister(&fc_class);
3518 destroy_workqueue(nvme_fc_wq);
3519}
3520
3521module_init(nvme_fc_init_module);
3522module_exit(nvme_fc_exit_module);
3523
3524MODULE_LICENSE("GPL v2");
3525