1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9#include <uapi/scsi/fc/fc_els.h>
10#include <linux/delay.h>
11#include <linux/overflow.h>
12#include <linux/blk-cgroup.h>
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17#include "fc.h"
18#include <scsi/scsi_transport_fc.h>
19
20
21
22
23enum nvme_fc_queue_flags {
24 NVME_FC_Q_CONNECTED = 0,
25 NVME_FC_Q_LIVE,
26};
27
28#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
29#define NVME_FC_DEFAULT_RECONNECT_TMO 2
30
31
32
33
34struct nvme_fc_queue {
35 struct nvme_fc_ctrl *ctrl;
36 struct device *dev;
37 struct blk_mq_hw_ctx *hctx;
38 void *lldd_handle;
39 size_t cmnd_capsule_len;
40 u32 qnum;
41 u32 rqcnt;
42 u32 seqno;
43
44 u64 connection_id;
45 atomic_t csn;
46
47 unsigned long flags;
48} __aligned(sizeof(u64));
49
50enum nvme_fcop_flags {
51 FCOP_FLAGS_TERMIO = (1 << 0),
52 FCOP_FLAGS_AEN = (1 << 1),
53};
54
55struct nvmefc_ls_req_op {
56 struct nvmefc_ls_req ls_req;
57
58 struct nvme_fc_rport *rport;
59 struct nvme_fc_queue *queue;
60 struct request *rq;
61 u32 flags;
62
63 int ls_error;
64 struct completion ls_done;
65 struct list_head lsreq_list;
66 bool req_queued;
67};
68
69struct nvmefc_ls_rcv_op {
70 struct nvme_fc_rport *rport;
71 struct nvmefc_ls_rsp *lsrsp;
72 union nvmefc_ls_requests *rqstbuf;
73 union nvmefc_ls_responses *rspbuf;
74 u16 rqstdatalen;
75 bool handled;
76 dma_addr_t rspdma;
77 struct list_head lsrcv_list;
78} __aligned(sizeof(u64));
79
80enum nvme_fcpop_state {
81 FCPOP_STATE_UNINIT = 0,
82 FCPOP_STATE_IDLE = 1,
83 FCPOP_STATE_ACTIVE = 2,
84 FCPOP_STATE_ABORTED = 3,
85 FCPOP_STATE_COMPLETE = 4,
86};
87
88struct nvme_fc_fcp_op {
89 struct nvme_request nreq;
90
91
92
93
94
95
96
97 struct nvmefc_fcp_req fcp_req;
98
99 struct nvme_fc_ctrl *ctrl;
100 struct nvme_fc_queue *queue;
101 struct request *rq;
102
103 atomic_t state;
104 u32 flags;
105 u32 rqno;
106 u32 nents;
107
108 struct nvme_fc_cmd_iu cmd_iu;
109 struct nvme_fc_ersp_iu rsp_iu;
110};
111
112struct nvme_fcp_op_w_sgl {
113 struct nvme_fc_fcp_op op;
114 struct scatterlist sgl[NVME_INLINE_SG_CNT];
115 uint8_t priv[];
116};
117
118struct nvme_fc_lport {
119 struct nvme_fc_local_port localport;
120
121 struct ida endp_cnt;
122 struct list_head port_list;
123 struct list_head endp_list;
124 struct device *dev;
125 struct nvme_fc_port_template *ops;
126 struct kref ref;
127 atomic_t act_rport_cnt;
128} __aligned(sizeof(u64));
129
130struct nvme_fc_rport {
131 struct nvme_fc_remote_port remoteport;
132
133 struct list_head endp_list;
134 struct list_head ctrl_list;
135 struct list_head ls_req_list;
136 struct list_head ls_rcv_list;
137 struct list_head disc_list;
138 struct device *dev;
139 struct nvme_fc_lport *lport;
140 spinlock_t lock;
141 struct kref ref;
142 atomic_t act_ctrl_cnt;
143 unsigned long dev_loss_end;
144 struct work_struct lsrcv_work;
145} __aligned(sizeof(u64));
146
147
148#define ASSOC_ACTIVE 0
149#define ASSOC_FAILED 1
150#define FCCTRL_TERMIO 2
151
152struct nvme_fc_ctrl {
153 spinlock_t lock;
154 struct nvme_fc_queue *queues;
155 struct device *dev;
156 struct nvme_fc_lport *lport;
157 struct nvme_fc_rport *rport;
158 u32 cnum;
159
160 bool ioq_live;
161 u64 association_id;
162 struct nvmefc_ls_rcv_op *rcv_disconn;
163
164 struct list_head ctrl_list;
165
166 struct blk_mq_tag_set admin_tag_set;
167 struct blk_mq_tag_set tag_set;
168
169 struct work_struct ioerr_work;
170 struct delayed_work connect_work;
171
172 struct kref ref;
173 unsigned long flags;
174 u32 iocnt;
175 wait_queue_head_t ioabort_wait;
176
177 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
178
179 struct nvme_ctrl ctrl;
180};
181
182static inline struct nvme_fc_ctrl *
183to_fc_ctrl(struct nvme_ctrl *ctrl)
184{
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
186}
187
188static inline struct nvme_fc_lport *
189localport_to_lport(struct nvme_fc_local_port *portptr)
190{
191 return container_of(portptr, struct nvme_fc_lport, localport);
192}
193
194static inline struct nvme_fc_rport *
195remoteport_to_rport(struct nvme_fc_remote_port *portptr)
196{
197 return container_of(portptr, struct nvme_fc_rport, remoteport);
198}
199
200static inline struct nvmefc_ls_req_op *
201ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
202{
203 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
204}
205
206static inline struct nvme_fc_fcp_op *
207fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
208{
209 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
210}
211
212
213
214
215
216
217static DEFINE_SPINLOCK(nvme_fc_lock);
218
219static LIST_HEAD(nvme_fc_lport_list);
220static DEFINE_IDA(nvme_fc_local_port_cnt);
221static DEFINE_IDA(nvme_fc_ctrl_cnt);
222
223static struct workqueue_struct *nvme_fc_wq;
224
225static bool nvme_fc_waiting_to_unload;
226static DECLARE_COMPLETION(nvme_fc_unload_proceed);
227
228
229
230
231
232static struct device *fc_udev_device;
233
234static void nvme_fc_complete_rq(struct request *rq);
235
236
237
238static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
239 struct nvme_fc_queue *, unsigned int);
240
241static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
242
243
244static void
245nvme_fc_free_lport(struct kref *ref)
246{
247 struct nvme_fc_lport *lport =
248 container_of(ref, struct nvme_fc_lport, ref);
249 unsigned long flags;
250
251 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
252 WARN_ON(!list_empty(&lport->endp_list));
253
254
255 spin_lock_irqsave(&nvme_fc_lock, flags);
256 list_del(&lport->port_list);
257 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
258 complete(&nvme_fc_unload_proceed);
259 spin_unlock_irqrestore(&nvme_fc_lock, flags);
260
261 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
262 ida_destroy(&lport->endp_cnt);
263
264 put_device(lport->dev);
265
266 kfree(lport);
267}
268
269static void
270nvme_fc_lport_put(struct nvme_fc_lport *lport)
271{
272 kref_put(&lport->ref, nvme_fc_free_lport);
273}
274
275static int
276nvme_fc_lport_get(struct nvme_fc_lport *lport)
277{
278 return kref_get_unless_zero(&lport->ref);
279}
280
281
282static struct nvme_fc_lport *
283nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
284 struct nvme_fc_port_template *ops,
285 struct device *dev)
286{
287 struct nvme_fc_lport *lport;
288 unsigned long flags;
289
290 spin_lock_irqsave(&nvme_fc_lock, flags);
291
292 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
293 if (lport->localport.node_name != pinfo->node_name ||
294 lport->localport.port_name != pinfo->port_name)
295 continue;
296
297 if (lport->dev != dev) {
298 lport = ERR_PTR(-EXDEV);
299 goto out_done;
300 }
301
302 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
303 lport = ERR_PTR(-EEXIST);
304 goto out_done;
305 }
306
307 if (!nvme_fc_lport_get(lport)) {
308
309
310
311
312 lport = NULL;
313 goto out_done;
314 }
315
316
317
318 lport->ops = ops;
319 lport->localport.port_role = pinfo->port_role;
320 lport->localport.port_id = pinfo->port_id;
321 lport->localport.port_state = FC_OBJSTATE_ONLINE;
322
323 spin_unlock_irqrestore(&nvme_fc_lock, flags);
324
325 return lport;
326 }
327
328 lport = NULL;
329
330out_done:
331 spin_unlock_irqrestore(&nvme_fc_lock, flags);
332
333 return lport;
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353int
354nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
355 struct nvme_fc_port_template *template,
356 struct device *dev,
357 struct nvme_fc_local_port **portptr)
358{
359 struct nvme_fc_lport *newrec;
360 unsigned long flags;
361 int ret, idx;
362
363 if (!template->localport_delete || !template->remoteport_delete ||
364 !template->ls_req || !template->fcp_io ||
365 !template->ls_abort || !template->fcp_abort ||
366 !template->max_hw_queues || !template->max_sgl_segments ||
367 !template->max_dif_sgl_segments || !template->dma_boundary) {
368 ret = -EINVAL;
369 goto out_reghost_failed;
370 }
371
372
373
374
375
376
377
378
379 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
380
381
382 if (IS_ERR(newrec)) {
383 ret = PTR_ERR(newrec);
384 goto out_reghost_failed;
385
386
387 } else if (newrec) {
388 *portptr = &newrec->localport;
389 return 0;
390 }
391
392
393
394 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
395 GFP_KERNEL);
396 if (!newrec) {
397 ret = -ENOMEM;
398 goto out_reghost_failed;
399 }
400
401 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
402 if (idx < 0) {
403 ret = -ENOSPC;
404 goto out_fail_kfree;
405 }
406
407 if (!get_device(dev) && dev) {
408 ret = -ENODEV;
409 goto out_ida_put;
410 }
411
412 INIT_LIST_HEAD(&newrec->port_list);
413 INIT_LIST_HEAD(&newrec->endp_list);
414 kref_init(&newrec->ref);
415 atomic_set(&newrec->act_rport_cnt, 0);
416 newrec->ops = template;
417 newrec->dev = dev;
418 ida_init(&newrec->endp_cnt);
419 if (template->local_priv_sz)
420 newrec->localport.private = &newrec[1];
421 else
422 newrec->localport.private = NULL;
423 newrec->localport.node_name = pinfo->node_name;
424 newrec->localport.port_name = pinfo->port_name;
425 newrec->localport.port_role = pinfo->port_role;
426 newrec->localport.port_id = pinfo->port_id;
427 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
428 newrec->localport.port_num = idx;
429
430 spin_lock_irqsave(&nvme_fc_lock, flags);
431 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
432 spin_unlock_irqrestore(&nvme_fc_lock, flags);
433
434 if (dev)
435 dma_set_seg_boundary(dev, template->dma_boundary);
436
437 *portptr = &newrec->localport;
438 return 0;
439
440out_ida_put:
441 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
442out_fail_kfree:
443 kfree(newrec);
444out_reghost_failed:
445 *portptr = NULL;
446
447 return ret;
448}
449EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
450
451
452
453
454
455
456
457
458
459
460
461int
462nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
463{
464 struct nvme_fc_lport *lport = localport_to_lport(portptr);
465 unsigned long flags;
466
467 if (!portptr)
468 return -EINVAL;
469
470 spin_lock_irqsave(&nvme_fc_lock, flags);
471
472 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
473 spin_unlock_irqrestore(&nvme_fc_lock, flags);
474 return -EINVAL;
475 }
476 portptr->port_state = FC_OBJSTATE_DELETED;
477
478 spin_unlock_irqrestore(&nvme_fc_lock, flags);
479
480 if (atomic_read(&lport->act_rport_cnt) == 0)
481 lport->ops->localport_delete(&lport->localport);
482
483 nvme_fc_lport_put(lport);
484
485 return 0;
486}
487EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
488
489
490
491
492
493
494
495
496
497#define FCNVME_TRADDR_LENGTH 64
498
499static void
500nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
501 struct nvme_fc_rport *rport)
502{
503 char hostaddr[FCNVME_TRADDR_LENGTH];
504 char tgtaddr[FCNVME_TRADDR_LENGTH];
505 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
506
507 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
508 return;
509
510 snprintf(hostaddr, sizeof(hostaddr),
511 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
512 lport->localport.node_name, lport->localport.port_name);
513 snprintf(tgtaddr, sizeof(tgtaddr),
514 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
515 rport->remoteport.node_name, rport->remoteport.port_name);
516 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
517}
518
519static void
520nvme_fc_free_rport(struct kref *ref)
521{
522 struct nvme_fc_rport *rport =
523 container_of(ref, struct nvme_fc_rport, ref);
524 struct nvme_fc_lport *lport =
525 localport_to_lport(rport->remoteport.localport);
526 unsigned long flags;
527
528 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
529 WARN_ON(!list_empty(&rport->ctrl_list));
530
531
532 spin_lock_irqsave(&nvme_fc_lock, flags);
533 list_del(&rport->endp_list);
534 spin_unlock_irqrestore(&nvme_fc_lock, flags);
535
536 WARN_ON(!list_empty(&rport->disc_list));
537 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
538
539 kfree(rport);
540
541 nvme_fc_lport_put(lport);
542}
543
544static void
545nvme_fc_rport_put(struct nvme_fc_rport *rport)
546{
547 kref_put(&rport->ref, nvme_fc_free_rport);
548}
549
550static int
551nvme_fc_rport_get(struct nvme_fc_rport *rport)
552{
553 return kref_get_unless_zero(&rport->ref);
554}
555
556static void
557nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
558{
559 switch (ctrl->ctrl.state) {
560 case NVME_CTRL_NEW:
561 case NVME_CTRL_CONNECTING:
562
563
564
565
566 dev_info(ctrl->ctrl.device,
567 "NVME-FC{%d}: connectivity re-established. "
568 "Attempting reconnect\n", ctrl->cnum);
569
570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
571 break;
572
573 case NVME_CTRL_RESETTING:
574
575
576
577
578
579 break;
580
581 default:
582
583 break;
584 }
585}
586
587static struct nvme_fc_rport *
588nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
589 struct nvme_fc_port_info *pinfo)
590{
591 struct nvme_fc_rport *rport;
592 struct nvme_fc_ctrl *ctrl;
593 unsigned long flags;
594
595 spin_lock_irqsave(&nvme_fc_lock, flags);
596
597 list_for_each_entry(rport, &lport->endp_list, endp_list) {
598 if (rport->remoteport.node_name != pinfo->node_name ||
599 rport->remoteport.port_name != pinfo->port_name)
600 continue;
601
602 if (!nvme_fc_rport_get(rport)) {
603 rport = ERR_PTR(-ENOLCK);
604 goto out_done;
605 }
606
607 spin_unlock_irqrestore(&nvme_fc_lock, flags);
608
609 spin_lock_irqsave(&rport->lock, flags);
610
611
612 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
613
614 spin_unlock_irqrestore(&rport->lock, flags);
615 nvme_fc_rport_put(rport);
616 return ERR_PTR(-ESTALE);
617 }
618
619 rport->remoteport.port_role = pinfo->port_role;
620 rport->remoteport.port_id = pinfo->port_id;
621 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
622 rport->dev_loss_end = 0;
623
624
625
626
627
628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
629 nvme_fc_resume_controller(ctrl);
630
631 spin_unlock_irqrestore(&rport->lock, flags);
632
633 return rport;
634 }
635
636 rport = NULL;
637
638out_done:
639 spin_unlock_irqrestore(&nvme_fc_lock, flags);
640
641 return rport;
642}
643
644static inline void
645__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
646 struct nvme_fc_port_info *pinfo)
647{
648 if (pinfo->dev_loss_tmo)
649 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
650 else
651 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670int
671nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
672 struct nvme_fc_port_info *pinfo,
673 struct nvme_fc_remote_port **portptr)
674{
675 struct nvme_fc_lport *lport = localport_to_lport(localport);
676 struct nvme_fc_rport *newrec;
677 unsigned long flags;
678 int ret, idx;
679
680 if (!nvme_fc_lport_get(lport)) {
681 ret = -ESHUTDOWN;
682 goto out_reghost_failed;
683 }
684
685
686
687
688
689
690 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
691
692
693 if (IS_ERR(newrec)) {
694 ret = PTR_ERR(newrec);
695 goto out_lport_put;
696
697
698 } else if (newrec) {
699 nvme_fc_lport_put(lport);
700 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
701 nvme_fc_signal_discovery_scan(lport, newrec);
702 *portptr = &newrec->remoteport;
703 return 0;
704 }
705
706
707
708 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
709 GFP_KERNEL);
710 if (!newrec) {
711 ret = -ENOMEM;
712 goto out_lport_put;
713 }
714
715 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
716 if (idx < 0) {
717 ret = -ENOSPC;
718 goto out_kfree_rport;
719 }
720
721 INIT_LIST_HEAD(&newrec->endp_list);
722 INIT_LIST_HEAD(&newrec->ctrl_list);
723 INIT_LIST_HEAD(&newrec->ls_req_list);
724 INIT_LIST_HEAD(&newrec->disc_list);
725 kref_init(&newrec->ref);
726 atomic_set(&newrec->act_ctrl_cnt, 0);
727 spin_lock_init(&newrec->lock);
728 newrec->remoteport.localport = &lport->localport;
729 INIT_LIST_HEAD(&newrec->ls_rcv_list);
730 newrec->dev = lport->dev;
731 newrec->lport = lport;
732 if (lport->ops->remote_priv_sz)
733 newrec->remoteport.private = &newrec[1];
734 else
735 newrec->remoteport.private = NULL;
736 newrec->remoteport.port_role = pinfo->port_role;
737 newrec->remoteport.node_name = pinfo->node_name;
738 newrec->remoteport.port_name = pinfo->port_name;
739 newrec->remoteport.port_id = pinfo->port_id;
740 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
741 newrec->remoteport.port_num = idx;
742 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
743 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
744
745 spin_lock_irqsave(&nvme_fc_lock, flags);
746 list_add_tail(&newrec->endp_list, &lport->endp_list);
747 spin_unlock_irqrestore(&nvme_fc_lock, flags);
748
749 nvme_fc_signal_discovery_scan(lport, newrec);
750
751 *portptr = &newrec->remoteport;
752 return 0;
753
754out_kfree_rport:
755 kfree(newrec);
756out_lport_put:
757 nvme_fc_lport_put(lport);
758out_reghost_failed:
759 *portptr = NULL;
760 return ret;
761}
762EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
763
764static int
765nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
766{
767 struct nvmefc_ls_req_op *lsop;
768 unsigned long flags;
769
770restart:
771 spin_lock_irqsave(&rport->lock, flags);
772
773 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
774 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
775 lsop->flags |= FCOP_FLAGS_TERMIO;
776 spin_unlock_irqrestore(&rport->lock, flags);
777 rport->lport->ops->ls_abort(&rport->lport->localport,
778 &rport->remoteport,
779 &lsop->ls_req);
780 goto restart;
781 }
782 }
783 spin_unlock_irqrestore(&rport->lock, flags);
784
785 return 0;
786}
787
788static void
789nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
790{
791 dev_info(ctrl->ctrl.device,
792 "NVME-FC{%d}: controller connectivity lost. Awaiting "
793 "Reconnect", ctrl->cnum);
794
795 switch (ctrl->ctrl.state) {
796 case NVME_CTRL_NEW:
797 case NVME_CTRL_LIVE:
798
799
800
801
802
803
804
805 if (nvme_reset_ctrl(&ctrl->ctrl)) {
806 dev_warn(ctrl->ctrl.device,
807 "NVME-FC{%d}: Couldn't schedule reset.\n",
808 ctrl->cnum);
809 nvme_delete_ctrl(&ctrl->ctrl);
810 }
811 break;
812
813 case NVME_CTRL_CONNECTING:
814
815
816
817
818
819
820
821 break;
822
823 case NVME_CTRL_RESETTING:
824
825
826
827
828
829
830 break;
831
832 case NVME_CTRL_DELETING:
833 case NVME_CTRL_DELETING_NOIO:
834 default:
835
836 break;
837 }
838}
839
840
841
842
843
844
845
846
847
848
849
850
851int
852nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
853{
854 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
855 struct nvme_fc_ctrl *ctrl;
856 unsigned long flags;
857
858 if (!portptr)
859 return -EINVAL;
860
861 spin_lock_irqsave(&rport->lock, flags);
862
863 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
864 spin_unlock_irqrestore(&rport->lock, flags);
865 return -EINVAL;
866 }
867 portptr->port_state = FC_OBJSTATE_DELETED;
868
869 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
870
871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
872
873 if (!portptr->dev_loss_tmo) {
874 dev_warn(ctrl->ctrl.device,
875 "NVME-FC{%d}: controller connectivity lost.\n",
876 ctrl->cnum);
877 nvme_delete_ctrl(&ctrl->ctrl);
878 } else
879 nvme_fc_ctrl_connectivity_loss(ctrl);
880 }
881
882 spin_unlock_irqrestore(&rport->lock, flags);
883
884 nvme_fc_abort_lsops(rport);
885
886 if (atomic_read(&rport->act_ctrl_cnt) == 0)
887 rport->lport->ops->remoteport_delete(portptr);
888
889
890
891
892
893
894 nvme_fc_rport_put(rport);
895
896 return 0;
897}
898EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
899
900
901
902
903
904
905
906
907
908void
909nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
910{
911 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
912
913 nvme_fc_signal_discovery_scan(rport->lport, rport);
914}
915EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
916
917int
918nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
919 u32 dev_loss_tmo)
920{
921 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
922 unsigned long flags;
923
924 spin_lock_irqsave(&rport->lock, flags);
925
926 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
927 spin_unlock_irqrestore(&rport->lock, flags);
928 return -EINVAL;
929 }
930
931
932 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
933
934 spin_unlock_irqrestore(&rport->lock, flags);
935
936 return 0;
937}
938EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959static inline dma_addr_t
960fc_dma_map_single(struct device *dev, void *ptr, size_t size,
961 enum dma_data_direction dir)
962{
963 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
964}
965
966static inline int
967fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
968{
969 return dev ? dma_mapping_error(dev, dma_addr) : 0;
970}
971
972static inline void
973fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
974 enum dma_data_direction dir)
975{
976 if (dev)
977 dma_unmap_single(dev, addr, size, dir);
978}
979
980static inline void
981fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
982 enum dma_data_direction dir)
983{
984 if (dev)
985 dma_sync_single_for_cpu(dev, addr, size, dir);
986}
987
988static inline void
989fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
990 enum dma_data_direction dir)
991{
992 if (dev)
993 dma_sync_single_for_device(dev, addr, size, dir);
994}
995
996
997static int
998fc_map_sg(struct scatterlist *sg, int nents)
999{
1000 struct scatterlist *s;
1001 int i;
1002
1003 WARN_ON(nents == 0 || sg[0].length == 0);
1004
1005 for_each_sg(sg, s, nents, i) {
1006 s->dma_address = 0L;
1007#ifdef CONFIG_NEED_SG_DMA_LENGTH
1008 s->dma_length = s->length;
1009#endif
1010 }
1011 return nents;
1012}
1013
1014static inline int
1015fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1016 enum dma_data_direction dir)
1017{
1018 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1019}
1020
1021static inline void
1022fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1023 enum dma_data_direction dir)
1024{
1025 if (dev)
1026 dma_unmap_sg(dev, sg, nents, dir);
1027}
1028
1029
1030
1031static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1032static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1033
1034static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1035
1036static void
1037__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1038{
1039 struct nvme_fc_rport *rport = lsop->rport;
1040 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1041 unsigned long flags;
1042
1043 spin_lock_irqsave(&rport->lock, flags);
1044
1045 if (!lsop->req_queued) {
1046 spin_unlock_irqrestore(&rport->lock, flags);
1047 return;
1048 }
1049
1050 list_del(&lsop->lsreq_list);
1051
1052 lsop->req_queued = false;
1053
1054 spin_unlock_irqrestore(&rport->lock, flags);
1055
1056 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1057 (lsreq->rqstlen + lsreq->rsplen),
1058 DMA_BIDIRECTIONAL);
1059
1060 nvme_fc_rport_put(rport);
1061}
1062
1063static int
1064__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1065 struct nvmefc_ls_req_op *lsop,
1066 void (*done)(struct nvmefc_ls_req *req, int status))
1067{
1068 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1069 unsigned long flags;
1070 int ret = 0;
1071
1072 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1073 return -ECONNREFUSED;
1074
1075 if (!nvme_fc_rport_get(rport))
1076 return -ESHUTDOWN;
1077
1078 lsreq->done = done;
1079 lsop->rport = rport;
1080 lsop->req_queued = false;
1081 INIT_LIST_HEAD(&lsop->lsreq_list);
1082 init_completion(&lsop->ls_done);
1083
1084 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1085 lsreq->rqstlen + lsreq->rsplen,
1086 DMA_BIDIRECTIONAL);
1087 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1088 ret = -EFAULT;
1089 goto out_putrport;
1090 }
1091 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1092
1093 spin_lock_irqsave(&rport->lock, flags);
1094
1095 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1096
1097 lsop->req_queued = true;
1098
1099 spin_unlock_irqrestore(&rport->lock, flags);
1100
1101 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1102 &rport->remoteport, lsreq);
1103 if (ret)
1104 goto out_unlink;
1105
1106 return 0;
1107
1108out_unlink:
1109 lsop->ls_error = ret;
1110 spin_lock_irqsave(&rport->lock, flags);
1111 lsop->req_queued = false;
1112 list_del(&lsop->lsreq_list);
1113 spin_unlock_irqrestore(&rport->lock, flags);
1114 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1115 (lsreq->rqstlen + lsreq->rsplen),
1116 DMA_BIDIRECTIONAL);
1117out_putrport:
1118 nvme_fc_rport_put(rport);
1119
1120 return ret;
1121}
1122
1123static void
1124nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1125{
1126 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1127
1128 lsop->ls_error = status;
1129 complete(&lsop->ls_done);
1130}
1131
1132static int
1133nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1134{
1135 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1136 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1137 int ret;
1138
1139 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1140
1141 if (!ret) {
1142
1143
1144
1145
1146
1147
1148 wait_for_completion(&lsop->ls_done);
1149
1150 __nvme_fc_finish_ls_req(lsop);
1151
1152 ret = lsop->ls_error;
1153 }
1154
1155 if (ret)
1156 return ret;
1157
1158
1159 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1160 return -ENXIO;
1161
1162 return 0;
1163}
1164
1165static int
1166nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1167 struct nvmefc_ls_req_op *lsop,
1168 void (*done)(struct nvmefc_ls_req *req, int status))
1169{
1170
1171
1172 return __nvme_fc_send_ls_req(rport, lsop, done);
1173}
1174
1175static int
1176nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1178{
1179 struct nvmefc_ls_req_op *lsop;
1180 struct nvmefc_ls_req *lsreq;
1181 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1183 unsigned long flags;
1184 int ret, fcret = 0;
1185
1186 lsop = kzalloc((sizeof(*lsop) +
1187 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1189 if (!lsop) {
1190 dev_info(ctrl->ctrl.device,
1191 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1192 ctrl->cnum);
1193 ret = -ENOMEM;
1194 goto out_no_memory;
1195 }
1196
1197 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1198 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1199 lsreq = &lsop->ls_req;
1200 if (ctrl->lport->ops->lsrqst_priv_sz)
1201 lsreq->private = &assoc_acc[1];
1202 else
1203 lsreq->private = NULL;
1204
1205 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1206 assoc_rqst->desc_list_len =
1207 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1208
1209 assoc_rqst->assoc_cmd.desc_tag =
1210 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1211 assoc_rqst->assoc_cmd.desc_len =
1212 fcnvme_lsdesc_len(
1213 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1214
1215 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1216 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1217
1218 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1221 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1223 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1224
1225 lsop->queue = queue;
1226 lsreq->rqstaddr = assoc_rqst;
1227 lsreq->rqstlen = sizeof(*assoc_rqst);
1228 lsreq->rspaddr = assoc_acc;
1229 lsreq->rsplen = sizeof(*assoc_acc);
1230 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1231
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1233 if (ret)
1234 goto out_free_buffer;
1235
1236
1237
1238
1239 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1240 fcret = VERR_LSACC;
1241 else if (assoc_acc->hdr.desc_list_len !=
1242 fcnvme_lsdesc_len(
1243 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1244 fcret = VERR_CR_ASSOC_ACC_LEN;
1245 else if (assoc_acc->hdr.rqst.desc_tag !=
1246 cpu_to_be32(FCNVME_LSDESC_RQST))
1247 fcret = VERR_LSDESC_RQST;
1248 else if (assoc_acc->hdr.rqst.desc_len !=
1249 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1250 fcret = VERR_LSDESC_RQST_LEN;
1251 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1252 fcret = VERR_CR_ASSOC;
1253 else if (assoc_acc->associd.desc_tag !=
1254 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1255 fcret = VERR_ASSOC_ID;
1256 else if (assoc_acc->associd.desc_len !=
1257 fcnvme_lsdesc_len(
1258 sizeof(struct fcnvme_lsdesc_assoc_id)))
1259 fcret = VERR_ASSOC_ID_LEN;
1260 else if (assoc_acc->connectid.desc_tag !=
1261 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1262 fcret = VERR_CONN_ID;
1263 else if (assoc_acc->connectid.desc_len !=
1264 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1265 fcret = VERR_CONN_ID_LEN;
1266
1267 if (fcret) {
1268 ret = -EBADF;
1269 dev_err(ctrl->dev,
1270 "q %d Create Association LS failed: %s\n",
1271 queue->qnum, validation_errors[fcret]);
1272 } else {
1273 spin_lock_irqsave(&ctrl->lock, flags);
1274 ctrl->association_id =
1275 be64_to_cpu(assoc_acc->associd.association_id);
1276 queue->connection_id =
1277 be64_to_cpu(assoc_acc->connectid.connection_id);
1278 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1279 spin_unlock_irqrestore(&ctrl->lock, flags);
1280 }
1281
1282out_free_buffer:
1283 kfree(lsop);
1284out_no_memory:
1285 if (ret)
1286 dev_err(ctrl->dev,
1287 "queue %d connect admin queue failed (%d).\n",
1288 queue->qnum, ret);
1289 return ret;
1290}
1291
1292static int
1293nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1294 u16 qsize, u16 ersp_ratio)
1295{
1296 struct nvmefc_ls_req_op *lsop;
1297 struct nvmefc_ls_req *lsreq;
1298 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1299 struct fcnvme_ls_cr_conn_acc *conn_acc;
1300 int ret, fcret = 0;
1301
1302 lsop = kzalloc((sizeof(*lsop) +
1303 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1305 if (!lsop) {
1306 dev_info(ctrl->ctrl.device,
1307 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1308 ctrl->cnum);
1309 ret = -ENOMEM;
1310 goto out_no_memory;
1311 }
1312
1313 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1314 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1315 lsreq = &lsop->ls_req;
1316 if (ctrl->lport->ops->lsrqst_priv_sz)
1317 lsreq->private = (void *)&conn_acc[1];
1318 else
1319 lsreq->private = NULL;
1320
1321 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1322 conn_rqst->desc_list_len = cpu_to_be32(
1323 sizeof(struct fcnvme_lsdesc_assoc_id) +
1324 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1325
1326 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327 conn_rqst->associd.desc_len =
1328 fcnvme_lsdesc_len(
1329 sizeof(struct fcnvme_lsdesc_assoc_id));
1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1331 conn_rqst->connect_cmd.desc_tag =
1332 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1333 conn_rqst->connect_cmd.desc_len =
1334 fcnvme_lsdesc_len(
1335 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1336 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1337 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1338 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1339
1340 lsop->queue = queue;
1341 lsreq->rqstaddr = conn_rqst;
1342 lsreq->rqstlen = sizeof(*conn_rqst);
1343 lsreq->rspaddr = conn_acc;
1344 lsreq->rsplen = sizeof(*conn_acc);
1345 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1346
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1348 if (ret)
1349 goto out_free_buffer;
1350
1351
1352
1353
1354 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1355 fcret = VERR_LSACC;
1356 else if (conn_acc->hdr.desc_list_len !=
1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1358 fcret = VERR_CR_CONN_ACC_LEN;
1359 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1360 fcret = VERR_LSDESC_RQST;
1361 else if (conn_acc->hdr.rqst.desc_len !=
1362 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1363 fcret = VERR_LSDESC_RQST_LEN;
1364 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1365 fcret = VERR_CR_CONN;
1366 else if (conn_acc->connectid.desc_tag !=
1367 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1368 fcret = VERR_CONN_ID;
1369 else if (conn_acc->connectid.desc_len !=
1370 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1371 fcret = VERR_CONN_ID_LEN;
1372
1373 if (fcret) {
1374 ret = -EBADF;
1375 dev_err(ctrl->dev,
1376 "q %d Create I/O Connection LS failed: %s\n",
1377 queue->qnum, validation_errors[fcret]);
1378 } else {
1379 queue->connection_id =
1380 be64_to_cpu(conn_acc->connectid.connection_id);
1381 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1382 }
1383
1384out_free_buffer:
1385 kfree(lsop);
1386out_no_memory:
1387 if (ret)
1388 dev_err(ctrl->dev,
1389 "queue %d connect I/O queue failed (%d).\n",
1390 queue->qnum, ret);
1391 return ret;
1392}
1393
1394static void
1395nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1396{
1397 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1398
1399 __nvme_fc_finish_ls_req(lsop);
1400
1401
1402
1403 kfree(lsop);
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423static void
1424nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1425{
1426 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1427 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1428 struct nvmefc_ls_req_op *lsop;
1429 struct nvmefc_ls_req *lsreq;
1430 int ret;
1431
1432 lsop = kzalloc((sizeof(*lsop) +
1433 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1435 if (!lsop) {
1436 dev_info(ctrl->ctrl.device,
1437 "NVME-FC{%d}: send Disconnect Association "
1438 "failed: ENOMEM\n",
1439 ctrl->cnum);
1440 return;
1441 }
1442
1443 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1444 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1445 lsreq = &lsop->ls_req;
1446 if (ctrl->lport->ops->lsrqst_priv_sz)
1447 lsreq->private = (void *)&discon_acc[1];
1448 else
1449 lsreq->private = NULL;
1450
1451 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1452 ctrl->association_id);
1453
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1455 nvme_fc_disconnect_assoc_done);
1456 if (ret)
1457 kfree(lsop);
1458}
1459
1460static void
1461nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1462{
1463 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1464 struct nvme_fc_rport *rport = lsop->rport;
1465 struct nvme_fc_lport *lport = rport->lport;
1466 unsigned long flags;
1467
1468 spin_lock_irqsave(&rport->lock, flags);
1469 list_del(&lsop->lsrcv_list);
1470 spin_unlock_irqrestore(&rport->lock, flags);
1471
1472 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1473 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1474 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1475 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1476
1477 kfree(lsop);
1478
1479 nvme_fc_rport_put(rport);
1480}
1481
1482static void
1483nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1484{
1485 struct nvme_fc_rport *rport = lsop->rport;
1486 struct nvme_fc_lport *lport = rport->lport;
1487 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1488 int ret;
1489
1490 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1491 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1492
1493 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1494 lsop->lsrsp);
1495 if (ret) {
1496 dev_warn(lport->dev,
1497 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1498 w0->ls_cmd, ret);
1499 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1500 return;
1501 }
1502}
1503
1504static struct nvme_fc_ctrl *
1505nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1506 struct nvmefc_ls_rcv_op *lsop)
1507{
1508 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1509 &lsop->rqstbuf->rq_dis_assoc;
1510 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1511 struct nvmefc_ls_rcv_op *oldls = NULL;
1512 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1513 unsigned long flags;
1514
1515 spin_lock_irqsave(&rport->lock, flags);
1516
1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1518 if (!nvme_fc_ctrl_get(ctrl))
1519 continue;
1520 spin_lock(&ctrl->lock);
1521 if (association_id == ctrl->association_id) {
1522 oldls = ctrl->rcv_disconn;
1523 ctrl->rcv_disconn = lsop;
1524 ret = ctrl;
1525 }
1526 spin_unlock(&ctrl->lock);
1527 if (ret)
1528
1529 break;
1530 nvme_fc_ctrl_put(ctrl);
1531 }
1532
1533 spin_unlock_irqrestore(&rport->lock, flags);
1534
1535
1536 if (oldls) {
1537 dev_info(rport->lport->dev,
1538 "NVME-FC{%d}: Multiple Disconnect Association "
1539 "LS's received\n", ctrl->cnum);
1540
1541 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1542 sizeof(*oldls->rspbuf),
1543 rqst->w0.ls_cmd,
1544 FCNVME_RJT_RC_UNAB,
1545 FCNVME_RJT_EXP_NONE, 0);
1546 nvme_fc_xmt_ls_rsp(oldls);
1547 }
1548
1549 return ret;
1550}
1551
1552
1553
1554
1555
1556
1557static bool
1558nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1559{
1560 struct nvme_fc_rport *rport = lsop->rport;
1561 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1562 &lsop->rqstbuf->rq_dis_assoc;
1563 struct fcnvme_ls_disconnect_assoc_acc *acc =
1564 &lsop->rspbuf->rsp_dis_assoc;
1565 struct nvme_fc_ctrl *ctrl = NULL;
1566 int ret = 0;
1567
1568 memset(acc, 0, sizeof(*acc));
1569
1570 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1571 if (!ret) {
1572
1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1574 if (!ctrl)
1575 ret = VERR_NO_ASSOC;
1576 }
1577
1578 if (ret) {
1579 dev_info(rport->lport->dev,
1580 "Disconnect LS failed: %s\n",
1581 validation_errors[ret]);
1582 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1583 sizeof(*acc), rqst->w0.ls_cmd,
1584 (ret == VERR_NO_ASSOC) ?
1585 FCNVME_RJT_RC_INV_ASSOC :
1586 FCNVME_RJT_RC_LOGIC,
1587 FCNVME_RJT_EXP_NONE, 0);
1588 return true;
1589 }
1590
1591
1592
1593 lsop->lsrsp->rsplen = sizeof(*acc);
1594
1595 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1596 fcnvme_lsdesc_len(
1597 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1598 FCNVME_LS_DISCONNECT_ASSOC);
1599
1600
1601
1602
1603
1604
1605
1606
1607 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1608
1609
1610 nvme_fc_ctrl_put(ctrl);
1611
1612 return false;
1613}
1614
1615
1616
1617
1618
1619
1620static bool
1621nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1622{
1623 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1624 bool ret = true;
1625
1626 lsop->lsrsp->nvme_fc_private = lsop;
1627 lsop->lsrsp->rspbuf = lsop->rspbuf;
1628 lsop->lsrsp->rspdma = lsop->rspdma;
1629 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1630
1631 lsop->lsrsp->rsplen = 0;
1632
1633
1634
1635
1636
1637
1638 switch (w0->ls_cmd) {
1639 case FCNVME_LS_DISCONNECT_ASSOC:
1640 ret = nvme_fc_ls_disconnect_assoc(lsop);
1641 break;
1642 case FCNVME_LS_DISCONNECT_CONN:
1643 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1644 sizeof(*lsop->rspbuf), w0->ls_cmd,
1645 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1646 break;
1647 case FCNVME_LS_CREATE_ASSOCIATION:
1648 case FCNVME_LS_CREATE_CONNECTION:
1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1650 sizeof(*lsop->rspbuf), w0->ls_cmd,
1651 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1652 break;
1653 default:
1654 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1655 sizeof(*lsop->rspbuf), w0->ls_cmd,
1656 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1657 break;
1658 }
1659
1660 return(ret);
1661}
1662
1663static void
1664nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1665{
1666 struct nvme_fc_rport *rport =
1667 container_of(work, struct nvme_fc_rport, lsrcv_work);
1668 struct fcnvme_ls_rqst_w0 *w0;
1669 struct nvmefc_ls_rcv_op *lsop;
1670 unsigned long flags;
1671 bool sendrsp;
1672
1673restart:
1674 sendrsp = true;
1675 spin_lock_irqsave(&rport->lock, flags);
1676 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1677 if (lsop->handled)
1678 continue;
1679
1680 lsop->handled = true;
1681 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1682 spin_unlock_irqrestore(&rport->lock, flags);
1683 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1684 } else {
1685 spin_unlock_irqrestore(&rport->lock, flags);
1686 w0 = &lsop->rqstbuf->w0;
1687 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1688 lsop->rspbuf,
1689 sizeof(*lsop->rspbuf),
1690 w0->ls_cmd,
1691 FCNVME_RJT_RC_UNAB,
1692 FCNVME_RJT_EXP_NONE, 0);
1693 }
1694 if (sendrsp)
1695 nvme_fc_xmt_ls_rsp(lsop);
1696 goto restart;
1697 }
1698 spin_unlock_irqrestore(&rport->lock, flags);
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720int
1721nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1722 struct nvmefc_ls_rsp *lsrsp,
1723 void *lsreqbuf, u32 lsreqbuf_len)
1724{
1725 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1726 struct nvme_fc_lport *lport = rport->lport;
1727 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1728 struct nvmefc_ls_rcv_op *lsop;
1729 unsigned long flags;
1730 int ret;
1731
1732 nvme_fc_rport_get(rport);
1733
1734
1735 if (!lport->ops->xmt_ls_rsp) {
1736 dev_info(lport->dev,
1737 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1738 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1739 nvmefc_ls_names[w0->ls_cmd] : "");
1740 ret = -EINVAL;
1741 goto out_put;
1742 }
1743
1744 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1745 dev_info(lport->dev,
1746 "RCV %s LS failed: payload too large\n",
1747 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1748 nvmefc_ls_names[w0->ls_cmd] : "");
1749 ret = -E2BIG;
1750 goto out_put;
1751 }
1752
1753 lsop = kzalloc(sizeof(*lsop) +
1754 sizeof(union nvmefc_ls_requests) +
1755 sizeof(union nvmefc_ls_responses),
1756 GFP_KERNEL);
1757 if (!lsop) {
1758 dev_info(lport->dev,
1759 "RCV %s LS failed: No memory\n",
1760 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1761 nvmefc_ls_names[w0->ls_cmd] : "");
1762 ret = -ENOMEM;
1763 goto out_put;
1764 }
1765 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1766 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1767
1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1769 sizeof(*lsop->rspbuf),
1770 DMA_TO_DEVICE);
1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1772 dev_info(lport->dev,
1773 "RCV %s LS failed: DMA mapping failure\n",
1774 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1775 nvmefc_ls_names[w0->ls_cmd] : "");
1776 ret = -EFAULT;
1777 goto out_free;
1778 }
1779
1780 lsop->rport = rport;
1781 lsop->lsrsp = lsrsp;
1782
1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1784 lsop->rqstdatalen = lsreqbuf_len;
1785
1786 spin_lock_irqsave(&rport->lock, flags);
1787 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1788 spin_unlock_irqrestore(&rport->lock, flags);
1789 ret = -ENOTCONN;
1790 goto out_unmap;
1791 }
1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1793 spin_unlock_irqrestore(&rport->lock, flags);
1794
1795 schedule_work(&rport->lsrcv_work);
1796
1797 return 0;
1798
1799out_unmap:
1800 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1802out_free:
1803 kfree(lsop);
1804out_put:
1805 nvme_fc_rport_put(rport);
1806 return ret;
1807}
1808EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1809
1810
1811
1812
1813static void
1814__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1815 struct nvme_fc_fcp_op *op)
1816{
1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1818 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1820 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1821
1822 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1823}
1824
1825static void
1826nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1827 unsigned int hctx_idx)
1828{
1829 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1830
1831 return __nvme_fc_exit_request(set->driver_data, op);
1832}
1833
1834static int
1835__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1836{
1837 unsigned long flags;
1838 int opstate;
1839
1840 spin_lock_irqsave(&ctrl->lock, flags);
1841 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1842 if (opstate != FCPOP_STATE_ACTIVE)
1843 atomic_set(&op->state, opstate);
1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1845 op->flags |= FCOP_FLAGS_TERMIO;
1846 ctrl->iocnt++;
1847 }
1848 spin_unlock_irqrestore(&ctrl->lock, flags);
1849
1850 if (opstate != FCPOP_STATE_ACTIVE)
1851 return -ECANCELED;
1852
1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1854 &ctrl->rport->remoteport,
1855 op->queue->lldd_handle,
1856 &op->fcp_req);
1857
1858 return 0;
1859}
1860
1861static void
1862nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1863{
1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1865 int i;
1866
1867
1868 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1869 return;
1870
1871 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1872 __nvme_fc_abort_op(ctrl, aen_op);
1873}
1874
1875static inline void
1876__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1877 struct nvme_fc_fcp_op *op, int opstate)
1878{
1879 unsigned long flags;
1880
1881 if (opstate == FCPOP_STATE_ABORTED) {
1882 spin_lock_irqsave(&ctrl->lock, flags);
1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1884 op->flags & FCOP_FLAGS_TERMIO) {
1885 if (!--ctrl->iocnt)
1886 wake_up(&ctrl->ioabort_wait);
1887 }
1888 spin_unlock_irqrestore(&ctrl->lock, flags);
1889 }
1890}
1891
1892static void
1893nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1894{
1895 struct nvme_fc_ctrl *ctrl =
1896 container_of(work, struct nvme_fc_ctrl, ioerr_work);
1897
1898 nvme_fc_error_recovery(ctrl, "transport detected io error");
1899}
1900
1901static void
1902nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1903{
1904 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1905 struct request *rq = op->rq;
1906 struct nvmefc_fcp_req *freq = &op->fcp_req;
1907 struct nvme_fc_ctrl *ctrl = op->ctrl;
1908 struct nvme_fc_queue *queue = op->queue;
1909 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1910 struct nvme_command *sqe = &op->cmd_iu.sqe;
1911 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1912 union nvme_result result;
1913 bool terminate_assoc = true;
1914 int opstate;
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1954
1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1956 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1957
1958 if (opstate == FCPOP_STATE_ABORTED)
1959 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1960 else if (freq->status) {
1961 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1962 dev_info(ctrl->ctrl.device,
1963 "NVME-FC{%d}: io failed due to lldd error %d\n",
1964 ctrl->cnum, freq->status);
1965 }
1966
1967
1968
1969
1970
1971
1972 if (status)
1973 goto done;
1974
1975
1976
1977
1978
1979
1980
1981
1982 switch (freq->rcv_rsplen) {
1983
1984 case 0:
1985 case NVME_FC_SIZEOF_ZEROS_RSP:
1986
1987
1988
1989
1990
1991 if (freq->transferred_length !=
1992 be32_to_cpu(op->cmd_iu.data_len)) {
1993 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1994 dev_info(ctrl->ctrl.device,
1995 "NVME-FC{%d}: io failed due to bad transfer "
1996 "length: %d vs expected %d\n",
1997 ctrl->cnum, freq->transferred_length,
1998 be32_to_cpu(op->cmd_iu.data_len));
1999 goto done;
2000 }
2001 result.u64 = 0;
2002 break;
2003
2004 case sizeof(struct nvme_fc_ersp_iu):
2005
2006
2007
2008
2009 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2010 (freq->rcv_rsplen / 4) ||
2011 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2012 freq->transferred_length ||
2013 op->rsp_iu.ersp_result ||
2014 sqe->common.command_id != cqe->command_id)) {
2015 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2016 dev_info(ctrl->ctrl.device,
2017 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2018 "iu len %d, xfr len %d vs %d, status code "
2019 "%d, cmdid %d vs %d\n",
2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2021 be32_to_cpu(op->rsp_iu.xfrd_len),
2022 freq->transferred_length,
2023 op->rsp_iu.ersp_result,
2024 sqe->common.command_id,
2025 cqe->command_id);
2026 goto done;
2027 }
2028 result = cqe->result;
2029 status = cqe->status;
2030 break;
2031
2032 default:
2033 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2034 dev_info(ctrl->ctrl.device,
2035 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2036 "len %d\n",
2037 ctrl->cnum, freq->rcv_rsplen);
2038 goto done;
2039 }
2040
2041 terminate_assoc = false;
2042
2043done:
2044 if (op->flags & FCOP_FLAGS_AEN) {
2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2047 atomic_set(&op->state, FCPOP_STATE_IDLE);
2048 op->flags = FCOP_FLAGS_AEN;
2049 nvme_fc_ctrl_put(ctrl);
2050 goto check_error;
2051 }
2052
2053 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2054 if (!nvme_try_complete_req(rq, status, result))
2055 nvme_fc_complete_rq(rq);
2056
2057check_error:
2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2060}
2061
2062static int
2063__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2064 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2065 struct request *rq, u32 rqno)
2066{
2067 struct nvme_fcp_op_w_sgl *op_w_sgl =
2068 container_of(op, typeof(*op_w_sgl), op);
2069 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2070 int ret = 0;
2071
2072 memset(op, 0, sizeof(*op));
2073 op->fcp_req.cmdaddr = &op->cmd_iu;
2074 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2075 op->fcp_req.rspaddr = &op->rsp_iu;
2076 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2077 op->fcp_req.done = nvme_fc_fcpio_done;
2078 op->ctrl = ctrl;
2079 op->queue = queue;
2080 op->rq = rq;
2081 op->rqno = rqno;
2082
2083 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2084 cmdiu->fc_id = NVME_CMD_FC_ID;
2085 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2086 if (queue->qnum)
2087 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2088 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2089 else
2090 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2091
2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2093 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2095 dev_err(ctrl->dev,
2096 "FCP Op failed - cmdiu dma mapping failed.\n");
2097 ret = -EFAULT;
2098 goto out_on_error;
2099 }
2100
2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2102 &op->rsp_iu, sizeof(op->rsp_iu),
2103 DMA_FROM_DEVICE);
2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2105 dev_err(ctrl->dev,
2106 "FCP Op failed - rspiu dma mapping failed.\n");
2107 ret = -EFAULT;
2108 }
2109
2110 atomic_set(&op->state, FCPOP_STATE_IDLE);
2111out_on_error:
2112 return ret;
2113}
2114
2115static int
2116nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2117 unsigned int hctx_idx, unsigned int numa_node)
2118{
2119 struct nvme_fc_ctrl *ctrl = set->driver_data;
2120 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2123 int res;
2124
2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2126 if (res)
2127 return res;
2128 op->op.fcp_req.first_sgl = op->sgl;
2129 op->op.fcp_req.private = &op->priv[0];
2130 nvme_req(rq)->ctrl = &ctrl->ctrl;
2131 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
2132 return res;
2133}
2134
2135static int
2136nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2137{
2138 struct nvme_fc_fcp_op *aen_op;
2139 struct nvme_fc_cmd_iu *cmdiu;
2140 struct nvme_command *sqe;
2141 void *private = NULL;
2142 int i, ret;
2143
2144 aen_op = ctrl->aen_ops;
2145 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2146 if (ctrl->lport->ops->fcprqst_priv_sz) {
2147 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2148 GFP_KERNEL);
2149 if (!private)
2150 return -ENOMEM;
2151 }
2152
2153 cmdiu = &aen_op->cmd_iu;
2154 sqe = &cmdiu->sqe;
2155 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2156 aen_op, (struct request *)NULL,
2157 (NVME_AQ_BLK_MQ_DEPTH + i));
2158 if (ret) {
2159 kfree(private);
2160 return ret;
2161 }
2162
2163 aen_op->flags = FCOP_FLAGS_AEN;
2164 aen_op->fcp_req.private = private;
2165
2166 memset(sqe, 0, sizeof(*sqe));
2167 sqe->common.opcode = nvme_admin_async_event;
2168
2169 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2170 }
2171 return 0;
2172}
2173
2174static void
2175nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2176{
2177 struct nvme_fc_fcp_op *aen_op;
2178 int i;
2179
2180 cancel_work_sync(&ctrl->ctrl.async_event_work);
2181 aen_op = ctrl->aen_ops;
2182 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2183 __nvme_fc_exit_request(ctrl, aen_op);
2184
2185 kfree(aen_op->fcp_req.private);
2186 aen_op->fcp_req.private = NULL;
2187 }
2188}
2189
2190static inline void
2191__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2192 unsigned int qidx)
2193{
2194 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2195
2196 hctx->driver_data = queue;
2197 queue->hctx = hctx;
2198}
2199
2200static int
2201nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2202 unsigned int hctx_idx)
2203{
2204 struct nvme_fc_ctrl *ctrl = data;
2205
2206 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2207
2208 return 0;
2209}
2210
2211static int
2212nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2213 unsigned int hctx_idx)
2214{
2215 struct nvme_fc_ctrl *ctrl = data;
2216
2217 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2218
2219 return 0;
2220}
2221
2222static void
2223nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2224{
2225 struct nvme_fc_queue *queue;
2226
2227 queue = &ctrl->queues[idx];
2228 memset(queue, 0, sizeof(*queue));
2229 queue->ctrl = ctrl;
2230 queue->qnum = idx;
2231 atomic_set(&queue->csn, 0);
2232 queue->dev = ctrl->dev;
2233
2234 if (idx > 0)
2235 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2236 else
2237 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static void
2260nvme_fc_free_queue(struct nvme_fc_queue *queue)
2261{
2262 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2263 return;
2264
2265 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2266
2267
2268
2269
2270
2271
2272 queue->connection_id = 0;
2273 atomic_set(&queue->csn, 0);
2274}
2275
2276static void
2277__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2278 struct nvme_fc_queue *queue, unsigned int qidx)
2279{
2280 if (ctrl->lport->ops->delete_queue)
2281 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2282 queue->lldd_handle);
2283 queue->lldd_handle = NULL;
2284}
2285
2286static void
2287nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2288{
2289 int i;
2290
2291 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2292 nvme_fc_free_queue(&ctrl->queues[i]);
2293}
2294
2295static int
2296__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2297 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2298{
2299 int ret = 0;
2300
2301 queue->lldd_handle = NULL;
2302 if (ctrl->lport->ops->create_queue)
2303 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2304 qidx, qsize, &queue->lldd_handle);
2305
2306 return ret;
2307}
2308
2309static void
2310nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2311{
2312 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2313 int i;
2314
2315 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2316 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2317}
2318
2319static int
2320nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2321{
2322 struct nvme_fc_queue *queue = &ctrl->queues[1];
2323 int i, ret;
2324
2325 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2326 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2327 if (ret)
2328 goto delete_queues;
2329 }
2330
2331 return 0;
2332
2333delete_queues:
2334 for (; i > 0; i--)
2335 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2336 return ret;
2337}
2338
2339static int
2340nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2341{
2342 int i, ret = 0;
2343
2344 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2345 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2346 (qsize / 5));
2347 if (ret)
2348 break;
2349 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2350 if (ret)
2351 break;
2352
2353 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2354 }
2355
2356 return ret;
2357}
2358
2359static void
2360nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2361{
2362 int i;
2363
2364 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2365 nvme_fc_init_queue(ctrl, i);
2366}
2367
2368static void
2369nvme_fc_ctrl_free(struct kref *ref)
2370{
2371 struct nvme_fc_ctrl *ctrl =
2372 container_of(ref, struct nvme_fc_ctrl, ref);
2373 unsigned long flags;
2374
2375 if (ctrl->ctrl.tagset) {
2376 blk_cleanup_queue(ctrl->ctrl.connect_q);
2377 blk_mq_free_tag_set(&ctrl->tag_set);
2378 }
2379
2380
2381 spin_lock_irqsave(&ctrl->rport->lock, flags);
2382 list_del(&ctrl->ctrl_list);
2383 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2384
2385 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2386 blk_cleanup_queue(ctrl->ctrl.admin_q);
2387 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2388 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2389
2390 kfree(ctrl->queues);
2391
2392 put_device(ctrl->dev);
2393 nvme_fc_rport_put(ctrl->rport);
2394
2395 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2396 if (ctrl->ctrl.opts)
2397 nvmf_free_options(ctrl->ctrl.opts);
2398 kfree(ctrl);
2399}
2400
2401static void
2402nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2403{
2404 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2405}
2406
2407static int
2408nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2409{
2410 return kref_get_unless_zero(&ctrl->ref);
2411}
2412
2413
2414
2415
2416
2417static void
2418nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2419{
2420 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2421
2422 WARN_ON(nctrl != &ctrl->ctrl);
2423
2424 nvme_fc_ctrl_put(ctrl);
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440static bool
2441nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2442{
2443 struct nvme_ctrl *nctrl = data;
2444 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2445 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2446
2447 op->nreq.flags |= NVME_REQ_CANCELLED;
2448 __nvme_fc_abort_op(ctrl, op);
2449 return true;
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461static void
2462__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2463{
2464 int q;
2465
2466
2467
2468
2469
2470 if (ctrl->ctrl.queue_count > 1) {
2471 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2472 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2473 }
2474 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 if (ctrl->ctrl.queue_count > 1) {
2489 nvme_stop_queues(&ctrl->ctrl);
2490 nvme_sync_io_queues(&ctrl->ctrl);
2491 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2492 nvme_fc_terminate_exchange, &ctrl->ctrl);
2493 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2494 if (start_queues)
2495 nvme_start_queues(&ctrl->ctrl);
2496 }
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2514 blk_sync_queue(ctrl->ctrl.admin_q);
2515 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2516 nvme_fc_terminate_exchange, &ctrl->ctrl);
2517 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2518}
2519
2520static void
2521nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2522{
2523
2524
2525
2526
2527
2528
2529
2530 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2531 __nvme_fc_abort_outstanding_ios(ctrl, true);
2532 set_bit(ASSOC_FAILED, &ctrl->flags);
2533 return;
2534 }
2535
2536
2537 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2538 return;
2539
2540 dev_warn(ctrl->ctrl.device,
2541 "NVME-FC{%d}: transport association event: %s\n",
2542 ctrl->cnum, errmsg);
2543 dev_warn(ctrl->ctrl.device,
2544 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2545
2546 nvme_reset_ctrl(&ctrl->ctrl);
2547}
2548
2549static enum blk_eh_timer_return
2550nvme_fc_timeout(struct request *rq, bool reserved)
2551{
2552 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2553 struct nvme_fc_ctrl *ctrl = op->ctrl;
2554 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2555 struct nvme_command *sqe = &cmdiu->sqe;
2556
2557
2558
2559
2560
2561 dev_info(ctrl->ctrl.device,
2562 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2563 "x%08x/x%08x\n",
2564 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2565 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2566 if (__nvme_fc_abort_op(ctrl, op))
2567 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2568
2569
2570
2571
2572
2573
2574 return BLK_EH_RESET_TIMER;
2575}
2576
2577static int
2578nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2579 struct nvme_fc_fcp_op *op)
2580{
2581 struct nvmefc_fcp_req *freq = &op->fcp_req;
2582 int ret;
2583
2584 freq->sg_cnt = 0;
2585
2586 if (!blk_rq_nr_phys_segments(rq))
2587 return 0;
2588
2589 freq->sg_table.sgl = freq->first_sgl;
2590 ret = sg_alloc_table_chained(&freq->sg_table,
2591 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2592 NVME_INLINE_SG_CNT);
2593 if (ret)
2594 return -ENOMEM;
2595
2596 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2597 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2598 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2599 op->nents, rq_dma_dir(rq));
2600 if (unlikely(freq->sg_cnt <= 0)) {
2601 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2602 freq->sg_cnt = 0;
2603 return -EFAULT;
2604 }
2605
2606
2607
2608
2609 return 0;
2610}
2611
2612static void
2613nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2614 struct nvme_fc_fcp_op *op)
2615{
2616 struct nvmefc_fcp_req *freq = &op->fcp_req;
2617
2618 if (!freq->sg_cnt)
2619 return;
2620
2621 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2622 rq_dma_dir(rq));
2623
2624 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2625
2626 freq->sg_cnt = 0;
2627}
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652static blk_status_t
2653nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2654 struct nvme_fc_fcp_op *op, u32 data_len,
2655 enum nvmefc_fcp_datadir io_dir)
2656{
2657 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2658 struct nvme_command *sqe = &cmdiu->sqe;
2659 int ret, opstate;
2660
2661
2662
2663
2664
2665 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2666 return BLK_STS_RESOURCE;
2667
2668 if (!nvme_fc_ctrl_get(ctrl))
2669 return BLK_STS_IOERR;
2670
2671
2672 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2673 cmdiu->data_len = cpu_to_be32(data_len);
2674 switch (io_dir) {
2675 case NVMEFC_FCP_WRITE:
2676 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2677 break;
2678 case NVMEFC_FCP_READ:
2679 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2680 break;
2681 case NVMEFC_FCP_NODATA:
2682 cmdiu->flags = 0;
2683 break;
2684 }
2685 op->fcp_req.payload_length = data_len;
2686 op->fcp_req.io_dir = io_dir;
2687 op->fcp_req.transferred_length = 0;
2688 op->fcp_req.rcv_rsplen = 0;
2689 op->fcp_req.status = NVME_SC_SUCCESS;
2690 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2691
2692
2693
2694
2695
2696 WARN_ON_ONCE(sqe->common.metadata);
2697 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2698
2699
2700
2701
2702
2703
2704
2705
2706 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2707 NVME_SGL_FMT_TRANSPORT_A;
2708 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2709 sqe->rw.dptr.sgl.addr = 0;
2710
2711 if (!(op->flags & FCOP_FLAGS_AEN)) {
2712 ret = nvme_fc_map_data(ctrl, op->rq, op);
2713 if (ret < 0) {
2714 nvme_cleanup_cmd(op->rq);
2715 nvme_fc_ctrl_put(ctrl);
2716 if (ret == -ENOMEM || ret == -EAGAIN)
2717 return BLK_STS_RESOURCE;
2718 return BLK_STS_IOERR;
2719 }
2720 }
2721
2722 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2723 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2724
2725 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2726
2727 if (!(op->flags & FCOP_FLAGS_AEN))
2728 blk_mq_start_request(op->rq);
2729
2730 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2731 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2732 &ctrl->rport->remoteport,
2733 queue->lldd_handle, &op->fcp_req);
2734
2735 if (ret) {
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2749 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2750
2751 if (!(op->flags & FCOP_FLAGS_AEN)) {
2752 nvme_fc_unmap_data(ctrl, op->rq, op);
2753 nvme_cleanup_cmd(op->rq);
2754 }
2755
2756 nvme_fc_ctrl_put(ctrl);
2757
2758 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2759 ret != -EBUSY)
2760 return BLK_STS_IOERR;
2761
2762 return BLK_STS_RESOURCE;
2763 }
2764
2765 return BLK_STS_OK;
2766}
2767
2768static blk_status_t
2769nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2770 const struct blk_mq_queue_data *bd)
2771{
2772 struct nvme_ns *ns = hctx->queue->queuedata;
2773 struct nvme_fc_queue *queue = hctx->driver_data;
2774 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2775 struct request *rq = bd->rq;
2776 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2777 enum nvmefc_fcp_datadir io_dir;
2778 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2779 u32 data_len;
2780 blk_status_t ret;
2781
2782 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2783 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2784 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2785
2786 ret = nvme_setup_cmd(ns, rq);
2787 if (ret)
2788 return ret;
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798 if (blk_rq_nr_phys_segments(rq)) {
2799 data_len = blk_rq_payload_bytes(rq);
2800 io_dir = ((rq_data_dir(rq) == WRITE) ?
2801 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2802 } else {
2803 data_len = 0;
2804 io_dir = NVMEFC_FCP_NODATA;
2805 }
2806
2807
2808 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2809}
2810
2811static void
2812nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2813{
2814 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2815 struct nvme_fc_fcp_op *aen_op;
2816 blk_status_t ret;
2817
2818 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2819 return;
2820
2821 aen_op = &ctrl->aen_ops[0];
2822
2823 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2824 NVMEFC_FCP_NODATA);
2825 if (ret)
2826 dev_err(ctrl->ctrl.device,
2827 "failed async event work\n");
2828}
2829
2830static void
2831nvme_fc_complete_rq(struct request *rq)
2832{
2833 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2834 struct nvme_fc_ctrl *ctrl = op->ctrl;
2835
2836 atomic_set(&op->state, FCPOP_STATE_IDLE);
2837 op->flags &= ~FCOP_FLAGS_TERMIO;
2838
2839 nvme_fc_unmap_data(ctrl, rq, op);
2840 nvme_complete_rq(rq);
2841 nvme_fc_ctrl_put(ctrl);
2842}
2843
2844
2845static const struct blk_mq_ops nvme_fc_mq_ops = {
2846 .queue_rq = nvme_fc_queue_rq,
2847 .complete = nvme_fc_complete_rq,
2848 .init_request = nvme_fc_init_request,
2849 .exit_request = nvme_fc_exit_request,
2850 .init_hctx = nvme_fc_init_hctx,
2851 .timeout = nvme_fc_timeout,
2852};
2853
2854static int
2855nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2856{
2857 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2858 unsigned int nr_io_queues;
2859 int ret;
2860
2861 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2862 ctrl->lport->ops->max_hw_queues);
2863 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2864 if (ret) {
2865 dev_info(ctrl->ctrl.device,
2866 "set_queue_count failed: %d\n", ret);
2867 return ret;
2868 }
2869
2870 ctrl->ctrl.queue_count = nr_io_queues + 1;
2871 if (!nr_io_queues)
2872 return 0;
2873
2874 nvme_fc_init_io_queues(ctrl);
2875
2876 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2877 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2878 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2879 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
2880 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2881 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2882 ctrl->tag_set.cmd_size =
2883 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2884 ctrl->lport->ops->fcprqst_priv_sz);
2885 ctrl->tag_set.driver_data = ctrl;
2886 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2887 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2888
2889 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2890 if (ret)
2891 return ret;
2892
2893 ctrl->ctrl.tagset = &ctrl->tag_set;
2894
2895 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2896 if (IS_ERR(ctrl->ctrl.connect_q)) {
2897 ret = PTR_ERR(ctrl->ctrl.connect_q);
2898 goto out_free_tag_set;
2899 }
2900
2901 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2902 if (ret)
2903 goto out_cleanup_blk_queue;
2904
2905 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2906 if (ret)
2907 goto out_delete_hw_queues;
2908
2909 ctrl->ioq_live = true;
2910
2911 return 0;
2912
2913out_delete_hw_queues:
2914 nvme_fc_delete_hw_io_queues(ctrl);
2915out_cleanup_blk_queue:
2916 blk_cleanup_queue(ctrl->ctrl.connect_q);
2917out_free_tag_set:
2918 blk_mq_free_tag_set(&ctrl->tag_set);
2919 nvme_fc_free_io_queues(ctrl);
2920
2921
2922 ctrl->ctrl.tagset = NULL;
2923
2924 return ret;
2925}
2926
2927static int
2928nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2929{
2930 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2931 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2932 unsigned int nr_io_queues;
2933 int ret;
2934
2935 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2936 ctrl->lport->ops->max_hw_queues);
2937 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2938 if (ret) {
2939 dev_info(ctrl->ctrl.device,
2940 "set_queue_count failed: %d\n", ret);
2941 return ret;
2942 }
2943
2944 if (!nr_io_queues && prior_ioq_cnt) {
2945 dev_info(ctrl->ctrl.device,
2946 "Fail Reconnect: At least 1 io queue "
2947 "required (was %d)\n", prior_ioq_cnt);
2948 return -ENOSPC;
2949 }
2950
2951 ctrl->ctrl.queue_count = nr_io_queues + 1;
2952
2953 if (ctrl->ctrl.queue_count == 1)
2954 return 0;
2955
2956 if (prior_ioq_cnt != nr_io_queues) {
2957 dev_info(ctrl->ctrl.device,
2958 "reconnect: revising io queue count from %d to %d\n",
2959 prior_ioq_cnt, nr_io_queues);
2960 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2961 }
2962
2963 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2964 if (ret)
2965 goto out_free_io_queues;
2966
2967 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2968 if (ret)
2969 goto out_delete_hw_queues;
2970
2971 return 0;
2972
2973out_delete_hw_queues:
2974 nvme_fc_delete_hw_io_queues(ctrl);
2975out_free_io_queues:
2976 nvme_fc_free_io_queues(ctrl);
2977 return ret;
2978}
2979
2980static void
2981nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2982{
2983 struct nvme_fc_lport *lport = rport->lport;
2984
2985 atomic_inc(&lport->act_rport_cnt);
2986}
2987
2988static void
2989nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2990{
2991 struct nvme_fc_lport *lport = rport->lport;
2992 u32 cnt;
2993
2994 cnt = atomic_dec_return(&lport->act_rport_cnt);
2995 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2996 lport->ops->localport_delete(&lport->localport);
2997}
2998
2999static int
3000nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3001{
3002 struct nvme_fc_rport *rport = ctrl->rport;
3003 u32 cnt;
3004
3005 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3006 return 1;
3007
3008 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3009 if (cnt == 1)
3010 nvme_fc_rport_active_on_lport(rport);
3011
3012 return 0;
3013}
3014
3015static int
3016nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3017{
3018 struct nvme_fc_rport *rport = ctrl->rport;
3019 struct nvme_fc_lport *lport = rport->lport;
3020 u32 cnt;
3021
3022
3023
3024 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3025 if (cnt == 0) {
3026 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3027 lport->ops->remoteport_delete(&rport->remoteport);
3028 nvme_fc_rport_inactive_on_lport(rport);
3029 }
3030
3031 return 0;
3032}
3033
3034
3035
3036
3037
3038static int
3039nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3040{
3041 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3042 struct nvmefc_ls_rcv_op *disls = NULL;
3043 unsigned long flags;
3044 int ret;
3045 bool changed;
3046
3047 ++ctrl->ctrl.nr_reconnects;
3048
3049 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3050 return -ENODEV;
3051
3052 if (nvme_fc_ctlr_active_on_rport(ctrl))
3053 return -ENOTUNIQ;
3054
3055 dev_info(ctrl->ctrl.device,
3056 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3057 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3058 ctrl->cnum, ctrl->lport->localport.port_name,
3059 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3060
3061 clear_bit(ASSOC_FAILED, &ctrl->flags);
3062
3063
3064
3065
3066
3067 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3068 NVME_AQ_DEPTH);
3069 if (ret)
3070 goto out_free_queue;
3071
3072 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3073 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3074 if (ret)
3075 goto out_delete_hw_queue;
3076
3077 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3078 if (ret)
3079 goto out_disconnect_admin_queue;
3080
3081 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3082
3083
3084
3085
3086
3087
3088
3089
3090 ret = nvme_enable_ctrl(&ctrl->ctrl);
3091 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3092 goto out_disconnect_admin_queue;
3093
3094 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3095 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3096 (ilog2(SZ_4K) - 9);
3097
3098 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3099
3100 ret = nvme_init_ctrl_finish(&ctrl->ctrl);
3101 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3102 goto out_disconnect_admin_queue;
3103
3104
3105
3106
3107 if (ctrl->ctrl.icdoff) {
3108 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3109 ctrl->ctrl.icdoff);
3110 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3111 goto out_disconnect_admin_queue;
3112 }
3113
3114
3115 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
3116 dev_err(ctrl->ctrl.device,
3117 "Mandatory sgls are not supported!\n");
3118 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3119 goto out_disconnect_admin_queue;
3120 }
3121
3122 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3123
3124 dev_warn(ctrl->ctrl.device,
3125 "queue_size %zu > ctrl maxcmd %u, reducing "
3126 "to maxcmd\n",
3127 opts->queue_size, ctrl->ctrl.maxcmd);
3128 opts->queue_size = ctrl->ctrl.maxcmd;
3129 }
3130
3131 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3132
3133 dev_warn(ctrl->ctrl.device,
3134 "queue_size %zu > ctrl sqsize %u, reducing "
3135 "to sqsize\n",
3136 opts->queue_size, ctrl->ctrl.sqsize + 1);
3137 opts->queue_size = ctrl->ctrl.sqsize + 1;
3138 }
3139
3140 ret = nvme_fc_init_aen_ops(ctrl);
3141 if (ret)
3142 goto out_term_aen_ops;
3143
3144
3145
3146
3147
3148 if (ctrl->ctrl.queue_count > 1) {
3149 if (!ctrl->ioq_live)
3150 ret = nvme_fc_create_io_queues(ctrl);
3151 else
3152 ret = nvme_fc_recreate_io_queues(ctrl);
3153 }
3154 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3155 goto out_term_aen_ops;
3156
3157 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3158
3159 ctrl->ctrl.nr_reconnects = 0;
3160
3161 if (changed)
3162 nvme_start_ctrl(&ctrl->ctrl);
3163
3164 return 0;
3165
3166out_term_aen_ops:
3167 nvme_fc_term_aen_ops(ctrl);
3168out_disconnect_admin_queue:
3169
3170 nvme_fc_xmt_disconnect_assoc(ctrl);
3171 spin_lock_irqsave(&ctrl->lock, flags);
3172 ctrl->association_id = 0;
3173 disls = ctrl->rcv_disconn;
3174 ctrl->rcv_disconn = NULL;
3175 spin_unlock_irqrestore(&ctrl->lock, flags);
3176 if (disls)
3177 nvme_fc_xmt_ls_rsp(disls);
3178out_delete_hw_queue:
3179 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3180out_free_queue:
3181 nvme_fc_free_queue(&ctrl->queues[0]);
3182 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3183 nvme_fc_ctlr_inactive_on_rport(ctrl);
3184
3185 return ret;
3186}
3187
3188
3189
3190
3191
3192
3193
3194
3195static void
3196nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3197{
3198 struct nvmefc_ls_rcv_op *disls = NULL;
3199 unsigned long flags;
3200
3201 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3202 return;
3203
3204 spin_lock_irqsave(&ctrl->lock, flags);
3205 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3206 ctrl->iocnt = 0;
3207 spin_unlock_irqrestore(&ctrl->lock, flags);
3208
3209 __nvme_fc_abort_outstanding_ios(ctrl, false);
3210
3211
3212 nvme_fc_abort_aen_ops(ctrl);
3213
3214
3215 spin_lock_irq(&ctrl->lock);
3216 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3217 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3218 spin_unlock_irq(&ctrl->lock);
3219
3220 nvme_fc_term_aen_ops(ctrl);
3221
3222
3223
3224
3225
3226
3227
3228 if (ctrl->association_id)
3229 nvme_fc_xmt_disconnect_assoc(ctrl);
3230
3231 spin_lock_irqsave(&ctrl->lock, flags);
3232 ctrl->association_id = 0;
3233 disls = ctrl->rcv_disconn;
3234 ctrl->rcv_disconn = NULL;
3235 spin_unlock_irqrestore(&ctrl->lock, flags);
3236 if (disls)
3237
3238
3239
3240
3241 nvme_fc_xmt_ls_rsp(disls);
3242
3243 if (ctrl->ctrl.tagset) {
3244 nvme_fc_delete_hw_io_queues(ctrl);
3245 nvme_fc_free_io_queues(ctrl);
3246 }
3247
3248 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3249 nvme_fc_free_queue(&ctrl->queues[0]);
3250
3251
3252 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3253
3254
3255 nvme_start_queues(&ctrl->ctrl);
3256
3257 nvme_fc_ctlr_inactive_on_rport(ctrl);
3258}
3259
3260static void
3261nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3262{
3263 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3264
3265 cancel_work_sync(&ctrl->ioerr_work);
3266 cancel_delayed_work_sync(&ctrl->connect_work);
3267
3268
3269
3270
3271 nvme_fc_delete_association(ctrl);
3272}
3273
3274static void
3275nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3276{
3277 struct nvme_fc_rport *rport = ctrl->rport;
3278 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3279 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3280 bool recon = true;
3281
3282 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3283 return;
3284
3285 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3286 dev_info(ctrl->ctrl.device,
3287 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3288 ctrl->cnum, status);
3289 if (status > 0 && (status & NVME_SC_DNR))
3290 recon = false;
3291 } else if (time_after_eq(jiffies, rport->dev_loss_end))
3292 recon = false;
3293
3294 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3295 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3296 dev_info(ctrl->ctrl.device,
3297 "NVME-FC{%d}: Reconnect attempt in %ld "
3298 "seconds\n",
3299 ctrl->cnum, recon_delay / HZ);
3300 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3301 recon_delay = rport->dev_loss_end - jiffies;
3302
3303 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3304 } else {
3305 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3306 if (status > 0 && (status & NVME_SC_DNR))
3307 dev_warn(ctrl->ctrl.device,
3308 "NVME-FC{%d}: reconnect failure\n",
3309 ctrl->cnum);
3310 else
3311 dev_warn(ctrl->ctrl.device,
3312 "NVME-FC{%d}: Max reconnect attempts "
3313 "(%d) reached.\n",
3314 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3315 } else
3316 dev_warn(ctrl->ctrl.device,
3317 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3318 "while waiting for remoteport connectivity.\n",
3319 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3320 (ctrl->ctrl.opts->max_reconnects *
3321 ctrl->ctrl.opts->reconnect_delay)));
3322 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3323 }
3324}
3325
3326static void
3327nvme_fc_reset_ctrl_work(struct work_struct *work)
3328{
3329 struct nvme_fc_ctrl *ctrl =
3330 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3331
3332 nvme_stop_ctrl(&ctrl->ctrl);
3333
3334
3335 nvme_fc_delete_association(ctrl);
3336
3337 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3338 dev_err(ctrl->ctrl.device,
3339 "NVME-FC{%d}: error_recovery: Couldn't change state "
3340 "to CONNECTING\n", ctrl->cnum);
3341
3342 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3343 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3344 dev_err(ctrl->ctrl.device,
3345 "NVME-FC{%d}: failed to schedule connect "
3346 "after reset\n", ctrl->cnum);
3347 } else {
3348 flush_delayed_work(&ctrl->connect_work);
3349 }
3350 } else {
3351 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3352 }
3353}
3354
3355
3356static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3357 .name = "fc",
3358 .module = THIS_MODULE,
3359 .flags = NVME_F_FABRICS,
3360 .reg_read32 = nvmf_reg_read32,
3361 .reg_read64 = nvmf_reg_read64,
3362 .reg_write32 = nvmf_reg_write32,
3363 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3364 .submit_async_event = nvme_fc_submit_async_event,
3365 .delete_ctrl = nvme_fc_delete_ctrl,
3366 .get_address = nvmf_get_address,
3367};
3368
3369static void
3370nvme_fc_connect_ctrl_work(struct work_struct *work)
3371{
3372 int ret;
3373
3374 struct nvme_fc_ctrl *ctrl =
3375 container_of(to_delayed_work(work),
3376 struct nvme_fc_ctrl, connect_work);
3377
3378 ret = nvme_fc_create_association(ctrl);
3379 if (ret)
3380 nvme_fc_reconnect_or_delete(ctrl, ret);
3381 else
3382 dev_info(ctrl->ctrl.device,
3383 "NVME-FC{%d}: controller connect complete\n",
3384 ctrl->cnum);
3385}
3386
3387
3388static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3389 .queue_rq = nvme_fc_queue_rq,
3390 .complete = nvme_fc_complete_rq,
3391 .init_request = nvme_fc_init_request,
3392 .exit_request = nvme_fc_exit_request,
3393 .init_hctx = nvme_fc_init_admin_hctx,
3394 .timeout = nvme_fc_timeout,
3395};
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406static bool
3407nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3408 struct nvmf_ctrl_options *opts)
3409{
3410 struct nvme_fc_ctrl *ctrl;
3411 unsigned long flags;
3412 bool found = false;
3413
3414 spin_lock_irqsave(&rport->lock, flags);
3415 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3416 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3417 if (found)
3418 break;
3419 }
3420 spin_unlock_irqrestore(&rport->lock, flags);
3421
3422 return found;
3423}
3424
3425static struct nvme_ctrl *
3426nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3427 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3428{
3429 struct nvme_fc_ctrl *ctrl;
3430 unsigned long flags;
3431 int ret, idx, ctrl_loss_tmo;
3432
3433 if (!(rport->remoteport.port_role &
3434 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3435 ret = -EBADR;
3436 goto out_fail;
3437 }
3438
3439 if (!opts->duplicate_connect &&
3440 nvme_fc_existing_controller(rport, opts)) {
3441 ret = -EALREADY;
3442 goto out_fail;
3443 }
3444
3445 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3446 if (!ctrl) {
3447 ret = -ENOMEM;
3448 goto out_fail;
3449 }
3450
3451 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3452 if (idx < 0) {
3453 ret = -ENOSPC;
3454 goto out_free_ctrl;
3455 }
3456
3457
3458
3459
3460
3461 if (opts->max_reconnects != -1 &&
3462 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3463 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3464 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3465 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3466 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3467 opts->reconnect_delay);
3468 }
3469
3470 ctrl->ctrl.opts = opts;
3471 ctrl->ctrl.nr_reconnects = 0;
3472 if (lport->dev)
3473 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3474 else
3475 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3476 INIT_LIST_HEAD(&ctrl->ctrl_list);
3477 ctrl->lport = lport;
3478 ctrl->rport = rport;
3479 ctrl->dev = lport->dev;
3480 ctrl->cnum = idx;
3481 ctrl->ioq_live = false;
3482 init_waitqueue_head(&ctrl->ioabort_wait);
3483
3484 get_device(ctrl->dev);
3485 kref_init(&ctrl->ref);
3486
3487 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3488 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3489 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3490 spin_lock_init(&ctrl->lock);
3491
3492
3493 ctrl->ctrl.queue_count = min_t(unsigned int,
3494 opts->nr_io_queues,
3495 lport->ops->max_hw_queues);
3496 ctrl->ctrl.queue_count++;
3497
3498 ctrl->ctrl.sqsize = opts->queue_size - 1;
3499 ctrl->ctrl.kato = opts->kato;
3500 ctrl->ctrl.cntlid = 0xffff;
3501
3502 ret = -ENOMEM;
3503 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3504 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3505 if (!ctrl->queues)
3506 goto out_free_ida;
3507
3508 nvme_fc_init_queue(ctrl, 0);
3509
3510 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3511 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3512 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3513 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
3514 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3515 ctrl->admin_tag_set.cmd_size =
3516 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3517 ctrl->lport->ops->fcprqst_priv_sz);
3518 ctrl->admin_tag_set.driver_data = ctrl;
3519 ctrl->admin_tag_set.nr_hw_queues = 1;
3520 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
3521 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3522
3523 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3524 if (ret)
3525 goto out_free_queues;
3526 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3527
3528 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3529 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3530 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3531 goto out_free_admin_tag_set;
3532 }
3533
3534 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3535 if (IS_ERR(ctrl->ctrl.admin_q)) {
3536 ret = PTR_ERR(ctrl->ctrl.admin_q);
3537 goto out_cleanup_fabrics_q;
3538 }
3539
3540
3541
3542
3543
3544
3545
3546
3547 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3548 if (ret)
3549 goto out_cleanup_admin_q;
3550
3551
3552
3553 spin_lock_irqsave(&rport->lock, flags);
3554 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3555 spin_unlock_irqrestore(&rport->lock, flags);
3556
3557 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3558 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3559 dev_err(ctrl->ctrl.device,
3560 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3561 goto fail_ctrl;
3562 }
3563
3564 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3565 dev_err(ctrl->ctrl.device,
3566 "NVME-FC{%d}: failed to schedule initial connect\n",
3567 ctrl->cnum);
3568 goto fail_ctrl;
3569 }
3570
3571 flush_delayed_work(&ctrl->connect_work);
3572
3573 dev_info(ctrl->ctrl.device,
3574 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3575 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3576
3577 return &ctrl->ctrl;
3578
3579fail_ctrl:
3580 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3581 cancel_work_sync(&ctrl->ioerr_work);
3582 cancel_work_sync(&ctrl->ctrl.reset_work);
3583 cancel_delayed_work_sync(&ctrl->connect_work);
3584
3585 ctrl->ctrl.opts = NULL;
3586
3587
3588 nvme_uninit_ctrl(&ctrl->ctrl);
3589
3590
3591 nvme_put_ctrl(&ctrl->ctrl);
3592
3593
3594
3595
3596
3597
3598
3599
3600 nvme_fc_rport_get(rport);
3601
3602 return ERR_PTR(-EIO);
3603
3604out_cleanup_admin_q:
3605 blk_cleanup_queue(ctrl->ctrl.admin_q);
3606out_cleanup_fabrics_q:
3607 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3608out_free_admin_tag_set:
3609 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3610out_free_queues:
3611 kfree(ctrl->queues);
3612out_free_ida:
3613 put_device(ctrl->dev);
3614 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3615out_free_ctrl:
3616 kfree(ctrl);
3617out_fail:
3618
3619 return ERR_PTR(ret);
3620}
3621
3622
3623struct nvmet_fc_traddr {
3624 u64 nn;
3625 u64 pn;
3626};
3627
3628static int
3629__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3630{
3631 u64 token64;
3632
3633 if (match_u64(sstr, &token64))
3634 return -EINVAL;
3635 *val = token64;
3636
3637 return 0;
3638}
3639
3640
3641
3642
3643
3644
3645static int
3646nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3647{
3648 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3649 substring_t wwn = { name, &name[sizeof(name)-1] };
3650 int nnoffset, pnoffset;
3651
3652
3653 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3654 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3655 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3656 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3657 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3658 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3659 NVME_FC_TRADDR_OXNNLEN;
3660 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3661 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3662 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3663 "pn-", NVME_FC_TRADDR_NNLEN))) {
3664 nnoffset = NVME_FC_TRADDR_NNLEN;
3665 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3666 } else
3667 goto out_einval;
3668
3669 name[0] = '0';
3670 name[1] = 'x';
3671 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3672
3673 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3674 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3675 goto out_einval;
3676
3677 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3678 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3679 goto out_einval;
3680
3681 return 0;
3682
3683out_einval:
3684 pr_warn("%s: bad traddr string\n", __func__);
3685 return -EINVAL;
3686}
3687
3688static struct nvme_ctrl *
3689nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3690{
3691 struct nvme_fc_lport *lport;
3692 struct nvme_fc_rport *rport;
3693 struct nvme_ctrl *ctrl;
3694 struct nvmet_fc_traddr laddr = { 0L, 0L };
3695 struct nvmet_fc_traddr raddr = { 0L, 0L };
3696 unsigned long flags;
3697 int ret;
3698
3699 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3700 if (ret || !raddr.nn || !raddr.pn)
3701 return ERR_PTR(-EINVAL);
3702
3703 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3704 if (ret || !laddr.nn || !laddr.pn)
3705 return ERR_PTR(-EINVAL);
3706
3707
3708 spin_lock_irqsave(&nvme_fc_lock, flags);
3709 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3710 if (lport->localport.node_name != laddr.nn ||
3711 lport->localport.port_name != laddr.pn ||
3712 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3713 continue;
3714
3715 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3716 if (rport->remoteport.node_name != raddr.nn ||
3717 rport->remoteport.port_name != raddr.pn ||
3718 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3719 continue;
3720
3721
3722 if (!nvme_fc_rport_get(rport))
3723 break;
3724
3725 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3726
3727 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3728 if (IS_ERR(ctrl))
3729 nvme_fc_rport_put(rport);
3730 return ctrl;
3731 }
3732 }
3733 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3734
3735 pr_warn("%s: %s - %s combination not found\n",
3736 __func__, opts->traddr, opts->host_traddr);
3737 return ERR_PTR(-ENOENT);
3738}
3739
3740
3741static struct nvmf_transport_ops nvme_fc_transport = {
3742 .name = "fc",
3743 .module = THIS_MODULE,
3744 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3745 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3746 .create_ctrl = nvme_fc_create_ctrl,
3747};
3748
3749
3750#define DISCOVERY_MAX_FAIL 20
3751
3752static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3753 struct device_attribute *attr, const char *buf, size_t count)
3754{
3755 unsigned long flags;
3756 LIST_HEAD(local_disc_list);
3757 struct nvme_fc_lport *lport;
3758 struct nvme_fc_rport *rport;
3759 int failcnt = 0;
3760
3761 spin_lock_irqsave(&nvme_fc_lock, flags);
3762restart:
3763 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3764 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3765 if (!nvme_fc_lport_get(lport))
3766 continue;
3767 if (!nvme_fc_rport_get(rport)) {
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777 nvme_fc_lport_put(lport);
3778
3779 if (failcnt++ < DISCOVERY_MAX_FAIL)
3780 goto restart;
3781
3782 pr_err("nvme_discovery: too many reference "
3783 "failures\n");
3784 goto process_local_list;
3785 }
3786 if (list_empty(&rport->disc_list))
3787 list_add_tail(&rport->disc_list,
3788 &local_disc_list);
3789 }
3790 }
3791
3792process_local_list:
3793 while (!list_empty(&local_disc_list)) {
3794 rport = list_first_entry(&local_disc_list,
3795 struct nvme_fc_rport, disc_list);
3796 list_del_init(&rport->disc_list);
3797 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3798
3799 lport = rport->lport;
3800
3801 nvme_fc_signal_discovery_scan(lport, rport);
3802 nvme_fc_rport_put(rport);
3803 nvme_fc_lport_put(lport);
3804
3805 spin_lock_irqsave(&nvme_fc_lock, flags);
3806 }
3807 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3808
3809 return count;
3810}
3811
3812
3813static int fc_parse_cgrpid(const char *buf, u64 *id)
3814{
3815 char cgrp_id[16+1];
3816 int cgrpid_len, j;
3817
3818 memset(cgrp_id, 0x0, sizeof(cgrp_id));
3819 for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
3820 if (buf[cgrpid_len] != ':')
3821 cgrp_id[cgrpid_len] = buf[cgrpid_len];
3822 else {
3823 j = 1;
3824 break;
3825 }
3826 }
3827 if (!j)
3828 return -EINVAL;
3829 if (kstrtou64(cgrp_id, 16, id) < 0)
3830 return -EINVAL;
3831 return cgrpid_len;
3832}
3833
3834
3835
3836
3837
3838
3839
3840static int fc_update_appid(const char *buf, size_t count)
3841{
3842 u64 cgrp_id;
3843 int appid_len = 0;
3844 int cgrpid_len = 0;
3845 char app_id[FC_APPID_LEN];
3846 int ret = 0;
3847
3848 if (buf[count-1] == '\n')
3849 count--;
3850
3851 if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
3852 return -EINVAL;
3853
3854 cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
3855 if (cgrpid_len < 0)
3856 return -EINVAL;
3857 appid_len = count - cgrpid_len - 1;
3858 if (appid_len > FC_APPID_LEN)
3859 return -EINVAL;
3860
3861 memset(app_id, 0x0, sizeof(app_id));
3862 memcpy(app_id, &buf[cgrpid_len+1], appid_len);
3863 ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
3864 if (ret < 0)
3865 return ret;
3866 return count;
3867}
3868
3869static ssize_t fc_appid_store(struct device *dev,
3870 struct device_attribute *attr, const char *buf, size_t count)
3871{
3872 int ret = 0;
3873
3874 ret = fc_update_appid(buf, count);
3875 if (ret < 0)
3876 return -EINVAL;
3877 return count;
3878}
3879static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3880static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
3881
3882static struct attribute *nvme_fc_attrs[] = {
3883 &dev_attr_nvme_discovery.attr,
3884 &dev_attr_appid_store.attr,
3885 NULL
3886};
3887
3888static const struct attribute_group nvme_fc_attr_group = {
3889 .attrs = nvme_fc_attrs,
3890};
3891
3892static const struct attribute_group *nvme_fc_attr_groups[] = {
3893 &nvme_fc_attr_group,
3894 NULL
3895};
3896
3897static struct class fc_class = {
3898 .name = "fc",
3899 .dev_groups = nvme_fc_attr_groups,
3900 .owner = THIS_MODULE,
3901};
3902
3903static int __init nvme_fc_init_module(void)
3904{
3905 int ret;
3906
3907 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3908 if (!nvme_fc_wq)
3909 return -ENOMEM;
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925 ret = class_register(&fc_class);
3926 if (ret) {
3927 pr_err("couldn't register class fc\n");
3928 goto out_destroy_wq;
3929 }
3930
3931
3932
3933
3934 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3935 "fc_udev_device");
3936 if (IS_ERR(fc_udev_device)) {
3937 pr_err("couldn't create fc_udev device!\n");
3938 ret = PTR_ERR(fc_udev_device);
3939 goto out_destroy_class;
3940 }
3941
3942 ret = nvmf_register_transport(&nvme_fc_transport);
3943 if (ret)
3944 goto out_destroy_device;
3945
3946 return 0;
3947
3948out_destroy_device:
3949 device_destroy(&fc_class, MKDEV(0, 0));
3950out_destroy_class:
3951 class_unregister(&fc_class);
3952out_destroy_wq:
3953 destroy_workqueue(nvme_fc_wq);
3954
3955 return ret;
3956}
3957
3958static void
3959nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3960{
3961 struct nvme_fc_ctrl *ctrl;
3962
3963 spin_lock(&rport->lock);
3964 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3965 dev_warn(ctrl->ctrl.device,
3966 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3967 ctrl->cnum);
3968 nvme_delete_ctrl(&ctrl->ctrl);
3969 }
3970 spin_unlock(&rport->lock);
3971}
3972
3973static void
3974nvme_fc_cleanup_for_unload(void)
3975{
3976 struct nvme_fc_lport *lport;
3977 struct nvme_fc_rport *rport;
3978
3979 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3980 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3981 nvme_fc_delete_controllers(rport);
3982 }
3983 }
3984}
3985
3986static void __exit nvme_fc_exit_module(void)
3987{
3988 unsigned long flags;
3989 bool need_cleanup = false;
3990
3991 spin_lock_irqsave(&nvme_fc_lock, flags);
3992 nvme_fc_waiting_to_unload = true;
3993 if (!list_empty(&nvme_fc_lport_list)) {
3994 need_cleanup = true;
3995 nvme_fc_cleanup_for_unload();
3996 }
3997 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3998 if (need_cleanup) {
3999 pr_info("%s: waiting for ctlr deletes\n", __func__);
4000 wait_for_completion(&nvme_fc_unload_proceed);
4001 pr_info("%s: ctrl deletes complete\n", __func__);
4002 }
4003
4004 nvmf_unregister_transport(&nvme_fc_transport);
4005
4006 ida_destroy(&nvme_fc_local_port_cnt);
4007 ida_destroy(&nvme_fc_ctrl_cnt);
4008
4009 device_destroy(&fc_class, MKDEV(0, 0));
4010 class_unregister(&fc_class);
4011 destroy_workqueue(nvme_fc_wq);
4012}
4013
4014module_init(nvme_fc_init_module);
4015module_exit(nvme_fc_exit_module);
4016
4017MODULE_LICENSE("GPL v2");
4018