1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22#include <linux/delay.h>
23
24#include "nvme.h"
25#include "fabrics.h"
26#include <linux/nvme-fc-driver.h>
27#include <linux/nvme-fc.h>
28
29
30
31
32
33
34
35
36
37#define NVME_FC_NR_AEN_COMMANDS 1
38#define NVME_FC_AQ_BLKMQ_DEPTH \
39 (NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
41
42enum nvme_fc_queue_flags {
43 NVME_FC_Q_CONNECTED = (1 << 0),
44};
45
46#define NVMEFC_QUEUE_DELAY 3
47
48struct nvme_fc_queue {
49 struct nvme_fc_ctrl *ctrl;
50 struct device *dev;
51 struct blk_mq_hw_ctx *hctx;
52 void *lldd_handle;
53 int queue_size;
54 size_t cmnd_capsule_len;
55 u32 qnum;
56 u32 rqcnt;
57 u32 seqno;
58
59 u64 connection_id;
60 atomic_t csn;
61
62 unsigned long flags;
63} __aligned(sizeof(u64));
64
65enum nvme_fcop_flags {
66 FCOP_FLAGS_TERMIO = (1 << 0),
67 FCOP_FLAGS_RELEASED = (1 << 1),
68 FCOP_FLAGS_COMPLETE = (1 << 2),
69 FCOP_FLAGS_AEN = (1 << 3),
70};
71
72struct nvmefc_ls_req_op {
73 struct nvmefc_ls_req ls_req;
74
75 struct nvme_fc_rport *rport;
76 struct nvme_fc_queue *queue;
77 struct request *rq;
78 u32 flags;
79
80 int ls_error;
81 struct completion ls_done;
82 struct list_head lsreq_list;
83 bool req_queued;
84};
85
86enum nvme_fcpop_state {
87 FCPOP_STATE_UNINIT = 0,
88 FCPOP_STATE_IDLE = 1,
89 FCPOP_STATE_ACTIVE = 2,
90 FCPOP_STATE_ABORTED = 3,
91 FCPOP_STATE_COMPLETE = 4,
92};
93
94struct nvme_fc_fcp_op {
95 struct nvme_request nreq;
96
97
98
99
100
101
102
103 struct nvmefc_fcp_req fcp_req;
104
105 struct nvme_fc_ctrl *ctrl;
106 struct nvme_fc_queue *queue;
107 struct request *rq;
108
109 atomic_t state;
110 u32 flags;
111 u32 rqno;
112 u32 nents;
113
114 struct nvme_fc_cmd_iu cmd_iu;
115 struct nvme_fc_ersp_iu rsp_iu;
116};
117
118struct nvme_fc_lport {
119 struct nvme_fc_local_port localport;
120
121 struct ida endp_cnt;
122 struct list_head port_list;
123 struct list_head endp_list;
124 struct device *dev;
125 struct nvme_fc_port_template *ops;
126 struct kref ref;
127} __aligned(sizeof(u64));
128
129struct nvme_fc_rport {
130 struct nvme_fc_remote_port remoteport;
131
132 struct list_head endp_list;
133 struct list_head ctrl_list;
134 struct list_head ls_req_list;
135 struct device *dev;
136 struct nvme_fc_lport *lport;
137 spinlock_t lock;
138 struct kref ref;
139} __aligned(sizeof(u64));
140
141enum nvme_fcctrl_flags {
142 FCCTRL_TERMIO = (1 << 0),
143};
144
145struct nvme_fc_ctrl {
146 spinlock_t lock;
147 struct nvme_fc_queue *queues;
148 struct device *dev;
149 struct nvme_fc_lport *lport;
150 struct nvme_fc_rport *rport;
151 u32 cnum;
152
153 u64 association_id;
154
155 struct list_head ctrl_list;
156
157 struct blk_mq_tag_set admin_tag_set;
158 struct blk_mq_tag_set tag_set;
159
160 struct work_struct delete_work;
161 struct delayed_work connect_work;
162
163 struct kref ref;
164 u32 flags;
165 u32 iocnt;
166 wait_queue_head_t ioabort_wait;
167
168 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
169
170 struct nvme_ctrl ctrl;
171};
172
173static inline struct nvme_fc_ctrl *
174to_fc_ctrl(struct nvme_ctrl *ctrl)
175{
176 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
177}
178
179static inline struct nvme_fc_lport *
180localport_to_lport(struct nvme_fc_local_port *portptr)
181{
182 return container_of(portptr, struct nvme_fc_lport, localport);
183}
184
185static inline struct nvme_fc_rport *
186remoteport_to_rport(struct nvme_fc_remote_port *portptr)
187{
188 return container_of(portptr, struct nvme_fc_rport, remoteport);
189}
190
191static inline struct nvmefc_ls_req_op *
192ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
193{
194 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
195}
196
197static inline struct nvme_fc_fcp_op *
198fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
199{
200 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
201}
202
203
204
205
206
207
208static DEFINE_SPINLOCK(nvme_fc_lock);
209
210static LIST_HEAD(nvme_fc_lport_list);
211static DEFINE_IDA(nvme_fc_local_port_cnt);
212static DEFINE_IDA(nvme_fc_ctrl_cnt);
213
214
215
216
217
218
219static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
220static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
221 struct nvme_fc_queue *, unsigned int);
222
223static void
224nvme_fc_free_lport(struct kref *ref)
225{
226 struct nvme_fc_lport *lport =
227 container_of(ref, struct nvme_fc_lport, ref);
228 unsigned long flags;
229
230 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
231 WARN_ON(!list_empty(&lport->endp_list));
232
233
234 spin_lock_irqsave(&nvme_fc_lock, flags);
235 list_del(&lport->port_list);
236 spin_unlock_irqrestore(&nvme_fc_lock, flags);
237
238
239 lport->ops->localport_delete(&lport->localport);
240
241 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
242 ida_destroy(&lport->endp_cnt);
243
244 put_device(lport->dev);
245
246 kfree(lport);
247}
248
249static void
250nvme_fc_lport_put(struct nvme_fc_lport *lport)
251{
252 kref_put(&lport->ref, nvme_fc_free_lport);
253}
254
255static int
256nvme_fc_lport_get(struct nvme_fc_lport *lport)
257{
258 return kref_get_unless_zero(&lport->ref);
259}
260
261
262static struct nvme_fc_lport *
263nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
264{
265 struct nvme_fc_lport *lport;
266 unsigned long flags;
267
268 spin_lock_irqsave(&nvme_fc_lock, flags);
269
270 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
271 if (lport->localport.node_name != pinfo->node_name ||
272 lport->localport.port_name != pinfo->port_name)
273 continue;
274
275 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
276 lport = ERR_PTR(-EEXIST);
277 goto out_done;
278 }
279
280 if (!nvme_fc_lport_get(lport)) {
281
282
283
284
285 lport = NULL;
286 goto out_done;
287 }
288
289
290
291 lport->localport.port_role = pinfo->port_role;
292 lport->localport.port_id = pinfo->port_id;
293 lport->localport.port_state = FC_OBJSTATE_ONLINE;
294
295 spin_unlock_irqrestore(&nvme_fc_lock, flags);
296
297 return lport;
298 }
299
300 lport = NULL;
301
302out_done:
303 spin_unlock_irqrestore(&nvme_fc_lock, flags);
304
305 return lport;
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325int
326nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
327 struct nvme_fc_port_template *template,
328 struct device *dev,
329 struct nvme_fc_local_port **portptr)
330{
331 struct nvme_fc_lport *newrec;
332 unsigned long flags;
333 int ret, idx;
334
335 if (!template->localport_delete || !template->remoteport_delete ||
336 !template->ls_req || !template->fcp_io ||
337 !template->ls_abort || !template->fcp_abort ||
338 !template->max_hw_queues || !template->max_sgl_segments ||
339 !template->max_dif_sgl_segments || !template->dma_boundary) {
340 ret = -EINVAL;
341 goto out_reghost_failed;
342 }
343
344
345
346
347
348
349
350
351 newrec = nvme_fc_attach_to_unreg_lport(pinfo);
352
353
354 if (IS_ERR(newrec)) {
355 ret = PTR_ERR(newrec);
356 goto out_reghost_failed;
357
358
359 } else if (newrec) {
360 *portptr = &newrec->localport;
361 return 0;
362 }
363
364
365
366 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
367 GFP_KERNEL);
368 if (!newrec) {
369 ret = -ENOMEM;
370 goto out_reghost_failed;
371 }
372
373 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
374 if (idx < 0) {
375 ret = -ENOSPC;
376 goto out_fail_kfree;
377 }
378
379 if (!get_device(dev) && dev) {
380 ret = -ENODEV;
381 goto out_ida_put;
382 }
383
384 INIT_LIST_HEAD(&newrec->port_list);
385 INIT_LIST_HEAD(&newrec->endp_list);
386 kref_init(&newrec->ref);
387 newrec->ops = template;
388 newrec->dev = dev;
389 ida_init(&newrec->endp_cnt);
390 newrec->localport.private = &newrec[1];
391 newrec->localport.node_name = pinfo->node_name;
392 newrec->localport.port_name = pinfo->port_name;
393 newrec->localport.port_role = pinfo->port_role;
394 newrec->localport.port_id = pinfo->port_id;
395 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
396 newrec->localport.port_num = idx;
397
398 spin_lock_irqsave(&nvme_fc_lock, flags);
399 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
400 spin_unlock_irqrestore(&nvme_fc_lock, flags);
401
402 if (dev)
403 dma_set_seg_boundary(dev, template->dma_boundary);
404
405 *portptr = &newrec->localport;
406 return 0;
407
408out_ida_put:
409 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
410out_fail_kfree:
411 kfree(newrec);
412out_reghost_failed:
413 *portptr = NULL;
414
415 return ret;
416}
417EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
418
419
420
421
422
423
424
425
426
427
428
429
430int
431nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
432{
433 struct nvme_fc_lport *lport = localport_to_lport(portptr);
434 unsigned long flags;
435
436 if (!portptr)
437 return -EINVAL;
438
439 spin_lock_irqsave(&nvme_fc_lock, flags);
440
441 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
442 spin_unlock_irqrestore(&nvme_fc_lock, flags);
443 return -EINVAL;
444 }
445 portptr->port_state = FC_OBJSTATE_DELETED;
446
447 spin_unlock_irqrestore(&nvme_fc_lock, flags);
448
449 nvme_fc_lport_put(lport);
450
451 return 0;
452}
453EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471int
472nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
473 struct nvme_fc_port_info *pinfo,
474 struct nvme_fc_remote_port **portptr)
475{
476 struct nvme_fc_lport *lport = localport_to_lport(localport);
477 struct nvme_fc_rport *newrec;
478 unsigned long flags;
479 int ret, idx;
480
481 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
482 GFP_KERNEL);
483 if (!newrec) {
484 ret = -ENOMEM;
485 goto out_reghost_failed;
486 }
487
488 if (!nvme_fc_lport_get(lport)) {
489 ret = -ESHUTDOWN;
490 goto out_kfree_rport;
491 }
492
493 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
494 if (idx < 0) {
495 ret = -ENOSPC;
496 goto out_lport_put;
497 }
498
499 INIT_LIST_HEAD(&newrec->endp_list);
500 INIT_LIST_HEAD(&newrec->ctrl_list);
501 INIT_LIST_HEAD(&newrec->ls_req_list);
502 kref_init(&newrec->ref);
503 spin_lock_init(&newrec->lock);
504 newrec->remoteport.localport = &lport->localport;
505 newrec->dev = lport->dev;
506 newrec->lport = lport;
507 newrec->remoteport.private = &newrec[1];
508 newrec->remoteport.port_role = pinfo->port_role;
509 newrec->remoteport.node_name = pinfo->node_name;
510 newrec->remoteport.port_name = pinfo->port_name;
511 newrec->remoteport.port_id = pinfo->port_id;
512 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
513 newrec->remoteport.port_num = idx;
514
515 spin_lock_irqsave(&nvme_fc_lock, flags);
516 list_add_tail(&newrec->endp_list, &lport->endp_list);
517 spin_unlock_irqrestore(&nvme_fc_lock, flags);
518
519 *portptr = &newrec->remoteport;
520 return 0;
521
522out_lport_put:
523 nvme_fc_lport_put(lport);
524out_kfree_rport:
525 kfree(newrec);
526out_reghost_failed:
527 *portptr = NULL;
528 return ret;
529}
530EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
531
532static void
533nvme_fc_free_rport(struct kref *ref)
534{
535 struct nvme_fc_rport *rport =
536 container_of(ref, struct nvme_fc_rport, ref);
537 struct nvme_fc_lport *lport =
538 localport_to_lport(rport->remoteport.localport);
539 unsigned long flags;
540
541 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
542 WARN_ON(!list_empty(&rport->ctrl_list));
543
544
545 spin_lock_irqsave(&nvme_fc_lock, flags);
546 list_del(&rport->endp_list);
547 spin_unlock_irqrestore(&nvme_fc_lock, flags);
548
549
550 lport->ops->remoteport_delete(&rport->remoteport);
551
552 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
553
554 kfree(rport);
555
556 nvme_fc_lport_put(lport);
557}
558
559static void
560nvme_fc_rport_put(struct nvme_fc_rport *rport)
561{
562 kref_put(&rport->ref, nvme_fc_free_rport);
563}
564
565static int
566nvme_fc_rport_get(struct nvme_fc_rport *rport)
567{
568 return kref_get_unless_zero(&rport->ref);
569}
570
571static int
572nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
573{
574 struct nvmefc_ls_req_op *lsop;
575 unsigned long flags;
576
577restart:
578 spin_lock_irqsave(&rport->lock, flags);
579
580 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
581 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
582 lsop->flags |= FCOP_FLAGS_TERMIO;
583 spin_unlock_irqrestore(&rport->lock, flags);
584 rport->lport->ops->ls_abort(&rport->lport->localport,
585 &rport->remoteport,
586 &lsop->ls_req);
587 goto restart;
588 }
589 }
590 spin_unlock_irqrestore(&rport->lock, flags);
591
592 return 0;
593}
594
595
596
597
598
599
600
601
602
603
604
605
606int
607nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
608{
609 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
610 struct nvme_fc_ctrl *ctrl;
611 unsigned long flags;
612
613 if (!portptr)
614 return -EINVAL;
615
616 spin_lock_irqsave(&rport->lock, flags);
617
618 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
619 spin_unlock_irqrestore(&rport->lock, flags);
620 return -EINVAL;
621 }
622 portptr->port_state = FC_OBJSTATE_DELETED;
623
624
625 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
626 __nvme_fc_del_ctrl(ctrl);
627
628 spin_unlock_irqrestore(&rport->lock, flags);
629
630 nvme_fc_abort_lsops(rport);
631
632 nvme_fc_rport_put(rport);
633 return 0;
634}
635EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656static inline dma_addr_t
657fc_dma_map_single(struct device *dev, void *ptr, size_t size,
658 enum dma_data_direction dir)
659{
660 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
661}
662
663static inline int
664fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
665{
666 return dev ? dma_mapping_error(dev, dma_addr) : 0;
667}
668
669static inline void
670fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
671 enum dma_data_direction dir)
672{
673 if (dev)
674 dma_unmap_single(dev, addr, size, dir);
675}
676
677static inline void
678fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
679 enum dma_data_direction dir)
680{
681 if (dev)
682 dma_sync_single_for_cpu(dev, addr, size, dir);
683}
684
685static inline void
686fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
687 enum dma_data_direction dir)
688{
689 if (dev)
690 dma_sync_single_for_device(dev, addr, size, dir);
691}
692
693
694static int
695fc_map_sg(struct scatterlist *sg, int nents)
696{
697 struct scatterlist *s;
698 int i;
699
700 WARN_ON(nents == 0 || sg[0].length == 0);
701
702 for_each_sg(sg, s, nents, i) {
703 s->dma_address = 0L;
704#ifdef CONFIG_NEED_SG_DMA_LENGTH
705 s->dma_length = s->length;
706#endif
707 }
708 return nents;
709}
710
711static inline int
712fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
713 enum dma_data_direction dir)
714{
715 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
716}
717
718static inline void
719fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
720 enum dma_data_direction dir)
721{
722 if (dev)
723 dma_unmap_sg(dev, sg, nents, dir);
724}
725
726
727
728
729static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
730static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
731
732
733static void
734__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
735{
736 struct nvme_fc_rport *rport = lsop->rport;
737 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
738 unsigned long flags;
739
740 spin_lock_irqsave(&rport->lock, flags);
741
742 if (!lsop->req_queued) {
743 spin_unlock_irqrestore(&rport->lock, flags);
744 return;
745 }
746
747 list_del(&lsop->lsreq_list);
748
749 lsop->req_queued = false;
750
751 spin_unlock_irqrestore(&rport->lock, flags);
752
753 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
754 (lsreq->rqstlen + lsreq->rsplen),
755 DMA_BIDIRECTIONAL);
756
757 nvme_fc_rport_put(rport);
758}
759
760static int
761__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
762 struct nvmefc_ls_req_op *lsop,
763 void (*done)(struct nvmefc_ls_req *req, int status))
764{
765 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
766 unsigned long flags;
767 int ret = 0;
768
769 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
770 return -ECONNREFUSED;
771
772 if (!nvme_fc_rport_get(rport))
773 return -ESHUTDOWN;
774
775 lsreq->done = done;
776 lsop->rport = rport;
777 lsop->req_queued = false;
778 INIT_LIST_HEAD(&lsop->lsreq_list);
779 init_completion(&lsop->ls_done);
780
781 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
782 lsreq->rqstlen + lsreq->rsplen,
783 DMA_BIDIRECTIONAL);
784 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
785 ret = -EFAULT;
786 goto out_putrport;
787 }
788 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
789
790 spin_lock_irqsave(&rport->lock, flags);
791
792 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
793
794 lsop->req_queued = true;
795
796 spin_unlock_irqrestore(&rport->lock, flags);
797
798 ret = rport->lport->ops->ls_req(&rport->lport->localport,
799 &rport->remoteport, lsreq);
800 if (ret)
801 goto out_unlink;
802
803 return 0;
804
805out_unlink:
806 lsop->ls_error = ret;
807 spin_lock_irqsave(&rport->lock, flags);
808 lsop->req_queued = false;
809 list_del(&lsop->lsreq_list);
810 spin_unlock_irqrestore(&rport->lock, flags);
811 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
812 (lsreq->rqstlen + lsreq->rsplen),
813 DMA_BIDIRECTIONAL);
814out_putrport:
815 nvme_fc_rport_put(rport);
816
817 return ret;
818}
819
820static void
821nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
822{
823 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
824
825 lsop->ls_error = status;
826 complete(&lsop->ls_done);
827}
828
829static int
830nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
831{
832 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
833 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
834 int ret;
835
836 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
837
838 if (!ret) {
839
840
841
842
843
844
845 wait_for_completion(&lsop->ls_done);
846
847 __nvme_fc_finish_ls_req(lsop);
848
849 ret = lsop->ls_error;
850 }
851
852 if (ret)
853 return ret;
854
855
856 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
857 return -ENXIO;
858
859 return 0;
860}
861
862static int
863nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
864 struct nvmefc_ls_req_op *lsop,
865 void (*done)(struct nvmefc_ls_req *req, int status))
866{
867
868
869 return __nvme_fc_send_ls_req(rport, lsop, done);
870}
871
872
873enum {
874 VERR_NO_ERROR = 0,
875 VERR_LSACC = 1,
876 VERR_LSDESC_RQST = 2,
877 VERR_LSDESC_RQST_LEN = 3,
878 VERR_ASSOC_ID = 4,
879 VERR_ASSOC_ID_LEN = 5,
880 VERR_CONN_ID = 6,
881 VERR_CONN_ID_LEN = 7,
882 VERR_CR_ASSOC = 8,
883 VERR_CR_ASSOC_ACC_LEN = 9,
884 VERR_CR_CONN = 10,
885 VERR_CR_CONN_ACC_LEN = 11,
886 VERR_DISCONN = 12,
887 VERR_DISCONN_ACC_LEN = 13,
888};
889
890static char *validation_errors[] = {
891 "OK",
892 "Not LS_ACC",
893 "Not LSDESC_RQST",
894 "Bad LSDESC_RQST Length",
895 "Not Association ID",
896 "Bad Association ID Length",
897 "Not Connection ID",
898 "Bad Connection ID Length",
899 "Not CR_ASSOC Rqst",
900 "Bad CR_ASSOC ACC Length",
901 "Not CR_CONN Rqst",
902 "Bad CR_CONN ACC Length",
903 "Not Disconnect Rqst",
904 "Bad Disconnect ACC Length",
905};
906
907static int
908nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
909 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
910{
911 struct nvmefc_ls_req_op *lsop;
912 struct nvmefc_ls_req *lsreq;
913 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
914 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
915 int ret, fcret = 0;
916
917 lsop = kzalloc((sizeof(*lsop) +
918 ctrl->lport->ops->lsrqst_priv_sz +
919 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
920 if (!lsop) {
921 ret = -ENOMEM;
922 goto out_no_memory;
923 }
924 lsreq = &lsop->ls_req;
925
926 lsreq->private = (void *)&lsop[1];
927 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
928 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
929 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
930
931 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
932 assoc_rqst->desc_list_len =
933 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
934
935 assoc_rqst->assoc_cmd.desc_tag =
936 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
937 assoc_rqst->assoc_cmd.desc_len =
938 fcnvme_lsdesc_len(
939 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
940
941 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
942 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
943
944 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
945 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
946 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
947 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
948 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
949 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
950
951 lsop->queue = queue;
952 lsreq->rqstaddr = assoc_rqst;
953 lsreq->rqstlen = sizeof(*assoc_rqst);
954 lsreq->rspaddr = assoc_acc;
955 lsreq->rsplen = sizeof(*assoc_acc);
956 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
957
958 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
959 if (ret)
960 goto out_free_buffer;
961
962
963
964
965 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
966 fcret = VERR_LSACC;
967 else if (assoc_acc->hdr.desc_list_len !=
968 fcnvme_lsdesc_len(
969 sizeof(struct fcnvme_ls_cr_assoc_acc)))
970 fcret = VERR_CR_ASSOC_ACC_LEN;
971 else if (assoc_acc->hdr.rqst.desc_tag !=
972 cpu_to_be32(FCNVME_LSDESC_RQST))
973 fcret = VERR_LSDESC_RQST;
974 else if (assoc_acc->hdr.rqst.desc_len !=
975 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
976 fcret = VERR_LSDESC_RQST_LEN;
977 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
978 fcret = VERR_CR_ASSOC;
979 else if (assoc_acc->associd.desc_tag !=
980 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
981 fcret = VERR_ASSOC_ID;
982 else if (assoc_acc->associd.desc_len !=
983 fcnvme_lsdesc_len(
984 sizeof(struct fcnvme_lsdesc_assoc_id)))
985 fcret = VERR_ASSOC_ID_LEN;
986 else if (assoc_acc->connectid.desc_tag !=
987 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
988 fcret = VERR_CONN_ID;
989 else if (assoc_acc->connectid.desc_len !=
990 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
991 fcret = VERR_CONN_ID_LEN;
992
993 if (fcret) {
994 ret = -EBADF;
995 dev_err(ctrl->dev,
996 "q %d connect failed: %s\n",
997 queue->qnum, validation_errors[fcret]);
998 } else {
999 ctrl->association_id =
1000 be64_to_cpu(assoc_acc->associd.association_id);
1001 queue->connection_id =
1002 be64_to_cpu(assoc_acc->connectid.connection_id);
1003 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1004 }
1005
1006out_free_buffer:
1007 kfree(lsop);
1008out_no_memory:
1009 if (ret)
1010 dev_err(ctrl->dev,
1011 "queue %d connect admin queue failed (%d).\n",
1012 queue->qnum, ret);
1013 return ret;
1014}
1015
1016static int
1017nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1018 u16 qsize, u16 ersp_ratio)
1019{
1020 struct nvmefc_ls_req_op *lsop;
1021 struct nvmefc_ls_req *lsreq;
1022 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1023 struct fcnvme_ls_cr_conn_acc *conn_acc;
1024 int ret, fcret = 0;
1025
1026 lsop = kzalloc((sizeof(*lsop) +
1027 ctrl->lport->ops->lsrqst_priv_sz +
1028 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1029 if (!lsop) {
1030 ret = -ENOMEM;
1031 goto out_no_memory;
1032 }
1033 lsreq = &lsop->ls_req;
1034
1035 lsreq->private = (void *)&lsop[1];
1036 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1037 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1038 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1039
1040 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1041 conn_rqst->desc_list_len = cpu_to_be32(
1042 sizeof(struct fcnvme_lsdesc_assoc_id) +
1043 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1044
1045 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1046 conn_rqst->associd.desc_len =
1047 fcnvme_lsdesc_len(
1048 sizeof(struct fcnvme_lsdesc_assoc_id));
1049 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1050 conn_rqst->connect_cmd.desc_tag =
1051 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1052 conn_rqst->connect_cmd.desc_len =
1053 fcnvme_lsdesc_len(
1054 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1055 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1056 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1057 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
1058
1059 lsop->queue = queue;
1060 lsreq->rqstaddr = conn_rqst;
1061 lsreq->rqstlen = sizeof(*conn_rqst);
1062 lsreq->rspaddr = conn_acc;
1063 lsreq->rsplen = sizeof(*conn_acc);
1064 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1065
1066 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1067 if (ret)
1068 goto out_free_buffer;
1069
1070
1071
1072
1073 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1074 fcret = VERR_LSACC;
1075 else if (conn_acc->hdr.desc_list_len !=
1076 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1077 fcret = VERR_CR_CONN_ACC_LEN;
1078 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1079 fcret = VERR_LSDESC_RQST;
1080 else if (conn_acc->hdr.rqst.desc_len !=
1081 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1082 fcret = VERR_LSDESC_RQST_LEN;
1083 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1084 fcret = VERR_CR_CONN;
1085 else if (conn_acc->connectid.desc_tag !=
1086 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1087 fcret = VERR_CONN_ID;
1088 else if (conn_acc->connectid.desc_len !=
1089 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1090 fcret = VERR_CONN_ID_LEN;
1091
1092 if (fcret) {
1093 ret = -EBADF;
1094 dev_err(ctrl->dev,
1095 "q %d connect failed: %s\n",
1096 queue->qnum, validation_errors[fcret]);
1097 } else {
1098 queue->connection_id =
1099 be64_to_cpu(conn_acc->connectid.connection_id);
1100 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1101 }
1102
1103out_free_buffer:
1104 kfree(lsop);
1105out_no_memory:
1106 if (ret)
1107 dev_err(ctrl->dev,
1108 "queue %d connect command failed (%d).\n",
1109 queue->qnum, ret);
1110 return ret;
1111}
1112
1113static void
1114nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1115{
1116 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1117
1118 __nvme_fc_finish_ls_req(lsop);
1119
1120
1121
1122 kfree(lsop);
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void
1143nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1144{
1145 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1146 struct fcnvme_ls_disconnect_acc *discon_acc;
1147 struct nvmefc_ls_req_op *lsop;
1148 struct nvmefc_ls_req *lsreq;
1149 int ret;
1150
1151 lsop = kzalloc((sizeof(*lsop) +
1152 ctrl->lport->ops->lsrqst_priv_sz +
1153 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1154 GFP_KERNEL);
1155 if (!lsop)
1156
1157 return;
1158
1159 lsreq = &lsop->ls_req;
1160
1161 lsreq->private = (void *)&lsop[1];
1162 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1163 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1164 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1165
1166 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1167 discon_rqst->desc_list_len = cpu_to_be32(
1168 sizeof(struct fcnvme_lsdesc_assoc_id) +
1169 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1170
1171 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1172 discon_rqst->associd.desc_len =
1173 fcnvme_lsdesc_len(
1174 sizeof(struct fcnvme_lsdesc_assoc_id));
1175
1176 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1177
1178 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1179 FCNVME_LSDESC_DISCONN_CMD);
1180 discon_rqst->discon_cmd.desc_len =
1181 fcnvme_lsdesc_len(
1182 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1183 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1184 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1185
1186 lsreq->rqstaddr = discon_rqst;
1187 lsreq->rqstlen = sizeof(*discon_rqst);
1188 lsreq->rspaddr = discon_acc;
1189 lsreq->rsplen = sizeof(*discon_acc);
1190 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1191
1192 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1193 nvme_fc_disconnect_assoc_done);
1194 if (ret)
1195 kfree(lsop);
1196
1197
1198 ctrl->association_id = 0;
1199}
1200
1201
1202
1203
1204static void __nvme_fc_final_op_cleanup(struct request *rq);
1205static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1206
1207static int
1208nvme_fc_reinit_request(void *data, struct request *rq)
1209{
1210 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1211 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1212
1213 memset(cmdiu, 0, sizeof(*cmdiu));
1214 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1215 cmdiu->fc_id = NVME_CMD_FC_ID;
1216 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1217 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1218
1219 return 0;
1220}
1221
1222static void
1223__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1224 struct nvme_fc_fcp_op *op)
1225{
1226 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1227 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1228 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1229 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1230
1231 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1232}
1233
1234static void
1235nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1236 unsigned int hctx_idx)
1237{
1238 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1239
1240 return __nvme_fc_exit_request(set->driver_data, op);
1241}
1242
1243static int
1244__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1245{
1246 int state;
1247
1248 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1249 if (state != FCPOP_STATE_ACTIVE) {
1250 atomic_set(&op->state, state);
1251 return -ECANCELED;
1252 }
1253
1254 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1255 &ctrl->rport->remoteport,
1256 op->queue->lldd_handle,
1257 &op->fcp_req);
1258
1259 return 0;
1260}
1261
1262static void
1263nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1264{
1265 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1266 unsigned long flags;
1267 int i, ret;
1268
1269 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1270 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1271 continue;
1272
1273 spin_lock_irqsave(&ctrl->lock, flags);
1274 if (ctrl->flags & FCCTRL_TERMIO) {
1275 ctrl->iocnt++;
1276 aen_op->flags |= FCOP_FLAGS_TERMIO;
1277 }
1278 spin_unlock_irqrestore(&ctrl->lock, flags);
1279
1280 ret = __nvme_fc_abort_op(ctrl, aen_op);
1281 if (ret) {
1282
1283
1284
1285
1286
1287
1288
1289 spin_lock_irqsave(&ctrl->lock, flags);
1290 if (ctrl->flags & FCCTRL_TERMIO)
1291 ctrl->iocnt--;
1292 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1293 spin_unlock_irqrestore(&ctrl->lock, flags);
1294 return;
1295 }
1296 }
1297}
1298
1299static inline int
1300__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1301 struct nvme_fc_fcp_op *op)
1302{
1303 unsigned long flags;
1304 bool complete_rq = false;
1305
1306 spin_lock_irqsave(&ctrl->lock, flags);
1307 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1308 if (ctrl->flags & FCCTRL_TERMIO) {
1309 if (!--ctrl->iocnt)
1310 wake_up(&ctrl->ioabort_wait);
1311 }
1312 }
1313 if (op->flags & FCOP_FLAGS_RELEASED)
1314 complete_rq = true;
1315 else
1316 op->flags |= FCOP_FLAGS_COMPLETE;
1317 spin_unlock_irqrestore(&ctrl->lock, flags);
1318
1319 return complete_rq;
1320}
1321
1322static void
1323nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1324{
1325 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1326 struct request *rq = op->rq;
1327 struct nvmefc_fcp_req *freq = &op->fcp_req;
1328 struct nvme_fc_ctrl *ctrl = op->ctrl;
1329 struct nvme_fc_queue *queue = op->queue;
1330 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1331 struct nvme_command *sqe = &op->cmd_iu.sqe;
1332 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1333 union nvme_result result;
1334 bool complete_rq, terminate_assoc = true;
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1374 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1375
1376 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1377 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1378 else if (freq->status)
1379 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1380
1381
1382
1383
1384
1385
1386 if (status)
1387 goto done;
1388
1389
1390
1391
1392
1393
1394
1395
1396 switch (freq->rcv_rsplen) {
1397
1398 case 0:
1399 case NVME_FC_SIZEOF_ZEROS_RSP:
1400
1401
1402
1403
1404
1405 if (freq->transferred_length !=
1406 be32_to_cpu(op->cmd_iu.data_len)) {
1407 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1408 goto done;
1409 }
1410 result.u64 = 0;
1411 break;
1412
1413 case sizeof(struct nvme_fc_ersp_iu):
1414
1415
1416
1417
1418 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1419 (freq->rcv_rsplen / 4) ||
1420 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1421 freq->transferred_length ||
1422 op->rsp_iu.status_code ||
1423 sqe->common.command_id != cqe->command_id)) {
1424 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1425 goto done;
1426 }
1427 result = cqe->result;
1428 status = cqe->status;
1429 break;
1430
1431 default:
1432 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1433 goto done;
1434 }
1435
1436 terminate_assoc = false;
1437
1438done:
1439 if (op->flags & FCOP_FLAGS_AEN) {
1440 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1441 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1442 atomic_set(&op->state, FCPOP_STATE_IDLE);
1443 op->flags = FCOP_FLAGS_AEN;
1444 nvme_fc_ctrl_put(ctrl);
1445 goto check_error;
1446 }
1447
1448 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1449 if (!complete_rq) {
1450 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1451 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1452 if (blk_queue_dying(rq->q))
1453 status |= cpu_to_le16(NVME_SC_DNR << 1);
1454 }
1455 nvme_end_request(rq, status, result);
1456 } else
1457 __nvme_fc_final_op_cleanup(rq);
1458
1459check_error:
1460 if (terminate_assoc)
1461 nvme_fc_error_recovery(ctrl, "transport detected io error");
1462}
1463
1464static int
1465__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1466 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1467 struct request *rq, u32 rqno)
1468{
1469 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1470 int ret = 0;
1471
1472 memset(op, 0, sizeof(*op));
1473 op->fcp_req.cmdaddr = &op->cmd_iu;
1474 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1475 op->fcp_req.rspaddr = &op->rsp_iu;
1476 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1477 op->fcp_req.done = nvme_fc_fcpio_done;
1478 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1479 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1480 op->ctrl = ctrl;
1481 op->queue = queue;
1482 op->rq = rq;
1483 op->rqno = rqno;
1484
1485 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1486 cmdiu->fc_id = NVME_CMD_FC_ID;
1487 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1488
1489 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1490 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1491 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1492 dev_err(ctrl->dev,
1493 "FCP Op failed - cmdiu dma mapping failed.\n");
1494 ret = EFAULT;
1495 goto out_on_error;
1496 }
1497
1498 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1499 &op->rsp_iu, sizeof(op->rsp_iu),
1500 DMA_FROM_DEVICE);
1501 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1502 dev_err(ctrl->dev,
1503 "FCP Op failed - rspiu dma mapping failed.\n");
1504 ret = EFAULT;
1505 }
1506
1507 atomic_set(&op->state, FCPOP_STATE_IDLE);
1508out_on_error:
1509 return ret;
1510}
1511
1512static int
1513nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1514 unsigned int hctx_idx, unsigned int numa_node)
1515{
1516 struct nvme_fc_ctrl *ctrl = set->driver_data;
1517 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1518 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1519 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1520
1521 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1522}
1523
1524static int
1525nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1526{
1527 struct nvme_fc_fcp_op *aen_op;
1528 struct nvme_fc_cmd_iu *cmdiu;
1529 struct nvme_command *sqe;
1530 void *private;
1531 int i, ret;
1532
1533 aen_op = ctrl->aen_ops;
1534 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1535 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1536 GFP_KERNEL);
1537 if (!private)
1538 return -ENOMEM;
1539
1540 cmdiu = &aen_op->cmd_iu;
1541 sqe = &cmdiu->sqe;
1542 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1543 aen_op, (struct request *)NULL,
1544 (AEN_CMDID_BASE + i));
1545 if (ret) {
1546 kfree(private);
1547 return ret;
1548 }
1549
1550 aen_op->flags = FCOP_FLAGS_AEN;
1551 aen_op->fcp_req.first_sgl = NULL;
1552 aen_op->fcp_req.private = private;
1553
1554 memset(sqe, 0, sizeof(*sqe));
1555 sqe->common.opcode = nvme_admin_async_event;
1556
1557 sqe->common.command_id = AEN_CMDID_BASE + i;
1558 }
1559 return 0;
1560}
1561
1562static void
1563nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1564{
1565 struct nvme_fc_fcp_op *aen_op;
1566 int i;
1567
1568 aen_op = ctrl->aen_ops;
1569 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1570 if (!aen_op->fcp_req.private)
1571 continue;
1572
1573 __nvme_fc_exit_request(ctrl, aen_op);
1574
1575 kfree(aen_op->fcp_req.private);
1576 aen_op->fcp_req.private = NULL;
1577 }
1578}
1579
1580static inline void
1581__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1582 unsigned int qidx)
1583{
1584 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1585
1586 hctx->driver_data = queue;
1587 queue->hctx = hctx;
1588}
1589
1590static int
1591nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1592 unsigned int hctx_idx)
1593{
1594 struct nvme_fc_ctrl *ctrl = data;
1595
1596 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1597
1598 return 0;
1599}
1600
1601static int
1602nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1603 unsigned int hctx_idx)
1604{
1605 struct nvme_fc_ctrl *ctrl = data;
1606
1607 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1608
1609 return 0;
1610}
1611
1612static void
1613nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1614{
1615 struct nvme_fc_queue *queue;
1616
1617 queue = &ctrl->queues[idx];
1618 memset(queue, 0, sizeof(*queue));
1619 queue->ctrl = ctrl;
1620 queue->qnum = idx;
1621 atomic_set(&queue->csn, 1);
1622 queue->dev = ctrl->dev;
1623
1624 if (idx > 0)
1625 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1626 else
1627 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1628
1629 queue->queue_size = queue_size;
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static void
1652nvme_fc_free_queue(struct nvme_fc_queue *queue)
1653{
1654 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1655 return;
1656
1657
1658
1659
1660
1661
1662
1663 queue->connection_id = 0;
1664 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1665}
1666
1667static void
1668__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1669 struct nvme_fc_queue *queue, unsigned int qidx)
1670{
1671 if (ctrl->lport->ops->delete_queue)
1672 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1673 queue->lldd_handle);
1674 queue->lldd_handle = NULL;
1675}
1676
1677static void
1678nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1679{
1680 int i;
1681
1682 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1683 nvme_fc_free_queue(&ctrl->queues[i]);
1684}
1685
1686static int
1687__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1688 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1689{
1690 int ret = 0;
1691
1692 queue->lldd_handle = NULL;
1693 if (ctrl->lport->ops->create_queue)
1694 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1695 qidx, qsize, &queue->lldd_handle);
1696
1697 return ret;
1698}
1699
1700static void
1701nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1702{
1703 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1704 int i;
1705
1706 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1707 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1708}
1709
1710static int
1711nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1712{
1713 struct nvme_fc_queue *queue = &ctrl->queues[1];
1714 int i, ret;
1715
1716 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1717 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1718 if (ret)
1719 goto delete_queues;
1720 }
1721
1722 return 0;
1723
1724delete_queues:
1725 for (; i >= 0; i--)
1726 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1727 return ret;
1728}
1729
1730static int
1731nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1732{
1733 int i, ret = 0;
1734
1735 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1736 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1737 (qsize / 5));
1738 if (ret)
1739 break;
1740 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1741 if (ret)
1742 break;
1743 }
1744
1745 return ret;
1746}
1747
1748static void
1749nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1750{
1751 int i;
1752
1753 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1754 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1755}
1756
1757static void
1758nvme_fc_ctrl_free(struct kref *ref)
1759{
1760 struct nvme_fc_ctrl *ctrl =
1761 container_of(ref, struct nvme_fc_ctrl, ref);
1762 unsigned long flags;
1763
1764 if (ctrl->ctrl.tagset) {
1765 blk_cleanup_queue(ctrl->ctrl.connect_q);
1766 blk_mq_free_tag_set(&ctrl->tag_set);
1767 }
1768
1769
1770 spin_lock_irqsave(&ctrl->rport->lock, flags);
1771 list_del(&ctrl->ctrl_list);
1772 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1773
1774 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1775 blk_cleanup_queue(ctrl->ctrl.admin_q);
1776 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1777
1778 kfree(ctrl->queues);
1779
1780 put_device(ctrl->dev);
1781 nvme_fc_rport_put(ctrl->rport);
1782
1783 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1784 if (ctrl->ctrl.opts)
1785 nvmf_free_options(ctrl->ctrl.opts);
1786 kfree(ctrl);
1787}
1788
1789static void
1790nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1791{
1792 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1793}
1794
1795static int
1796nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1797{
1798 return kref_get_unless_zero(&ctrl->ref);
1799}
1800
1801
1802
1803
1804
1805static void
1806nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
1807{
1808 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1809
1810 WARN_ON(nctrl != &ctrl->ctrl);
1811
1812 nvme_fc_ctrl_put(ctrl);
1813}
1814
1815static void
1816nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1817{
1818
1819 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
1820 return;
1821
1822 dev_warn(ctrl->ctrl.device,
1823 "NVME-FC{%d}: transport association error detected: %s\n",
1824 ctrl->cnum, errmsg);
1825 dev_warn(ctrl->ctrl.device,
1826 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1827
1828 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1829 dev_err(ctrl->ctrl.device,
1830 "NVME-FC{%d}: error_recovery: Couldn't change state "
1831 "to RECONNECTING\n", ctrl->cnum);
1832 return;
1833 }
1834
1835 nvme_reset_ctrl(&ctrl->ctrl);
1836}
1837
1838static enum blk_eh_timer_return
1839nvme_fc_timeout(struct request *rq, bool reserved)
1840{
1841 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1842 struct nvme_fc_ctrl *ctrl = op->ctrl;
1843 int ret;
1844
1845 if (reserved)
1846 return BLK_EH_RESET_TIMER;
1847
1848 ret = __nvme_fc_abort_op(ctrl, op);
1849 if (ret)
1850
1851 return BLK_EH_HANDLED;
1852
1853
1854
1855
1856
1857
1858
1859
1860 nvme_fc_error_recovery(ctrl, "io timeout error");
1861
1862 return BLK_EH_HANDLED;
1863}
1864
1865static int
1866nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1867 struct nvme_fc_fcp_op *op)
1868{
1869 struct nvmefc_fcp_req *freq = &op->fcp_req;
1870 enum dma_data_direction dir;
1871 int ret;
1872
1873 freq->sg_cnt = 0;
1874
1875 if (!blk_rq_payload_bytes(rq))
1876 return 0;
1877
1878 freq->sg_table.sgl = freq->first_sgl;
1879 ret = sg_alloc_table_chained(&freq->sg_table,
1880 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1881 if (ret)
1882 return -ENOMEM;
1883
1884 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1885 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1886 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1887 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1888 op->nents, dir);
1889 if (unlikely(freq->sg_cnt <= 0)) {
1890 sg_free_table_chained(&freq->sg_table, true);
1891 freq->sg_cnt = 0;
1892 return -EFAULT;
1893 }
1894
1895
1896
1897
1898 return 0;
1899}
1900
1901static void
1902nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1903 struct nvme_fc_fcp_op *op)
1904{
1905 struct nvmefc_fcp_req *freq = &op->fcp_req;
1906
1907 if (!freq->sg_cnt)
1908 return;
1909
1910 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1911 ((rq_data_dir(rq) == WRITE) ?
1912 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1913
1914 nvme_cleanup_cmd(rq);
1915
1916 sg_free_table_chained(&freq->sg_table, true);
1917
1918 freq->sg_cnt = 0;
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static blk_status_t
1945nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1946 struct nvme_fc_fcp_op *op, u32 data_len,
1947 enum nvmefc_fcp_datadir io_dir)
1948{
1949 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1950 struct nvme_command *sqe = &cmdiu->sqe;
1951 u32 csn;
1952 int ret;
1953
1954
1955
1956
1957
1958 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1959 goto busy;
1960
1961 if (!nvme_fc_ctrl_get(ctrl))
1962 return BLK_STS_IOERR;
1963
1964
1965 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1966 csn = atomic_inc_return(&queue->csn);
1967 cmdiu->csn = cpu_to_be32(csn);
1968 cmdiu->data_len = cpu_to_be32(data_len);
1969 switch (io_dir) {
1970 case NVMEFC_FCP_WRITE:
1971 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1972 break;
1973 case NVMEFC_FCP_READ:
1974 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1975 break;
1976 case NVMEFC_FCP_NODATA:
1977 cmdiu->flags = 0;
1978 break;
1979 }
1980 op->fcp_req.payload_length = data_len;
1981 op->fcp_req.io_dir = io_dir;
1982 op->fcp_req.transferred_length = 0;
1983 op->fcp_req.rcv_rsplen = 0;
1984 op->fcp_req.status = NVME_SC_SUCCESS;
1985 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1986
1987
1988
1989
1990
1991 WARN_ON_ONCE(sqe->common.metadata);
1992 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1993
1994
1995
1996
1997
1998
1999
2000
2001 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2002 NVME_SGL_FMT_TRANSPORT_A;
2003 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2004 sqe->rw.dptr.sgl.addr = 0;
2005
2006 if (!(op->flags & FCOP_FLAGS_AEN)) {
2007 ret = nvme_fc_map_data(ctrl, op->rq, op);
2008 if (ret < 0) {
2009 nvme_cleanup_cmd(op->rq);
2010 nvme_fc_ctrl_put(ctrl);
2011 if (ret == -ENOMEM || ret == -EAGAIN)
2012 return BLK_STS_RESOURCE;
2013 return BLK_STS_IOERR;
2014 }
2015 }
2016
2017 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2018 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2019
2020 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2021
2022 if (!(op->flags & FCOP_FLAGS_AEN))
2023 blk_mq_start_request(op->rq);
2024
2025 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2026 &ctrl->rport->remoteport,
2027 queue->lldd_handle, &op->fcp_req);
2028
2029 if (ret) {
2030 if (!(op->flags & FCOP_FLAGS_AEN))
2031 nvme_fc_unmap_data(ctrl, op->rq, op);
2032
2033 nvme_fc_ctrl_put(ctrl);
2034
2035 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2036 ret != -EBUSY)
2037 return BLK_STS_IOERR;
2038
2039 goto busy;
2040 }
2041
2042 return BLK_STS_OK;
2043
2044busy:
2045 if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
2046 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
2047
2048 return BLK_STS_RESOURCE;
2049}
2050
2051static blk_status_t
2052nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2053 const struct blk_mq_queue_data *bd)
2054{
2055 struct nvme_ns *ns = hctx->queue->queuedata;
2056 struct nvme_fc_queue *queue = hctx->driver_data;
2057 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2058 struct request *rq = bd->rq;
2059 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2060 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2061 struct nvme_command *sqe = &cmdiu->sqe;
2062 enum nvmefc_fcp_datadir io_dir;
2063 u32 data_len;
2064 blk_status_t ret;
2065
2066 ret = nvme_setup_cmd(ns, rq, sqe);
2067 if (ret)
2068 return ret;
2069
2070 data_len = blk_rq_payload_bytes(rq);
2071 if (data_len)
2072 io_dir = ((rq_data_dir(rq) == WRITE) ?
2073 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2074 else
2075 io_dir = NVMEFC_FCP_NODATA;
2076
2077 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2078}
2079
2080static struct blk_mq_tags *
2081nvme_fc_tagset(struct nvme_fc_queue *queue)
2082{
2083 if (queue->qnum == 0)
2084 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2085
2086 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2087}
2088
2089static int
2090nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2091
2092{
2093 struct nvme_fc_queue *queue = hctx->driver_data;
2094 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2095 struct request *req;
2096 struct nvme_fc_fcp_op *op;
2097
2098 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2099 if (!req)
2100 return 0;
2101
2102 op = blk_mq_rq_to_pdu(req);
2103
2104 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2105 (ctrl->lport->ops->poll_queue))
2106 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2107 queue->lldd_handle);
2108
2109 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2110}
2111
2112static void
2113nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2114{
2115 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2116 struct nvme_fc_fcp_op *aen_op;
2117 unsigned long flags;
2118 bool terminating = false;
2119 blk_status_t ret;
2120
2121 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2122 return;
2123
2124 spin_lock_irqsave(&ctrl->lock, flags);
2125 if (ctrl->flags & FCCTRL_TERMIO)
2126 terminating = true;
2127 spin_unlock_irqrestore(&ctrl->lock, flags);
2128
2129 if (terminating)
2130 return;
2131
2132 aen_op = &ctrl->aen_ops[aer_idx];
2133
2134 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2135 NVMEFC_FCP_NODATA);
2136 if (ret)
2137 dev_err(ctrl->ctrl.device,
2138 "failed async event work [%d]\n", aer_idx);
2139}
2140
2141static void
2142__nvme_fc_final_op_cleanup(struct request *rq)
2143{
2144 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2145 struct nvme_fc_ctrl *ctrl = op->ctrl;
2146
2147 atomic_set(&op->state, FCPOP_STATE_IDLE);
2148 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2149 FCOP_FLAGS_COMPLETE);
2150
2151 nvme_fc_unmap_data(ctrl, rq, op);
2152 nvme_complete_rq(rq);
2153 nvme_fc_ctrl_put(ctrl);
2154
2155}
2156
2157static void
2158nvme_fc_complete_rq(struct request *rq)
2159{
2160 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2161 struct nvme_fc_ctrl *ctrl = op->ctrl;
2162 unsigned long flags;
2163 bool completed = false;
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 spin_lock_irqsave(&ctrl->lock, flags);
2174 if (op->flags & FCOP_FLAGS_COMPLETE)
2175 completed = true;
2176 else
2177 op->flags |= FCOP_FLAGS_RELEASED;
2178 spin_unlock_irqrestore(&ctrl->lock, flags);
2179
2180 if (completed)
2181 __nvme_fc_final_op_cleanup(rq);
2182}
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static void
2198nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2199{
2200 struct nvme_ctrl *nctrl = data;
2201 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2202 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2203 unsigned long flags;
2204 int status;
2205
2206 if (!blk_mq_request_started(req))
2207 return;
2208
2209 spin_lock_irqsave(&ctrl->lock, flags);
2210 if (ctrl->flags & FCCTRL_TERMIO) {
2211 ctrl->iocnt++;
2212 op->flags |= FCOP_FLAGS_TERMIO;
2213 }
2214 spin_unlock_irqrestore(&ctrl->lock, flags);
2215
2216 status = __nvme_fc_abort_op(ctrl, op);
2217 if (status) {
2218
2219
2220
2221
2222
2223
2224
2225 spin_lock_irqsave(&ctrl->lock, flags);
2226 if (ctrl->flags & FCCTRL_TERMIO)
2227 ctrl->iocnt--;
2228 op->flags &= ~FCOP_FLAGS_TERMIO;
2229 spin_unlock_irqrestore(&ctrl->lock, flags);
2230 return;
2231 }
2232}
2233
2234
2235static const struct blk_mq_ops nvme_fc_mq_ops = {
2236 .queue_rq = nvme_fc_queue_rq,
2237 .complete = nvme_fc_complete_rq,
2238 .init_request = nvme_fc_init_request,
2239 .exit_request = nvme_fc_exit_request,
2240 .init_hctx = nvme_fc_init_hctx,
2241 .poll = nvme_fc_poll,
2242 .timeout = nvme_fc_timeout,
2243};
2244
2245static int
2246nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2247{
2248 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2249 unsigned int nr_io_queues;
2250 int ret;
2251
2252 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2253 ctrl->lport->ops->max_hw_queues);
2254 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2255 if (ret) {
2256 dev_info(ctrl->ctrl.device,
2257 "set_queue_count failed: %d\n", ret);
2258 return ret;
2259 }
2260
2261 ctrl->ctrl.queue_count = nr_io_queues + 1;
2262 if (!nr_io_queues)
2263 return 0;
2264
2265 nvme_fc_init_io_queues(ctrl);
2266
2267 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2268 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2269 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2270 ctrl->tag_set.reserved_tags = 1;
2271 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2272 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2273 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2274 (SG_CHUNK_SIZE *
2275 sizeof(struct scatterlist)) +
2276 ctrl->lport->ops->fcprqst_priv_sz;
2277 ctrl->tag_set.driver_data = ctrl;
2278 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2279 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2280
2281 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2282 if (ret)
2283 return ret;
2284
2285 ctrl->ctrl.tagset = &ctrl->tag_set;
2286
2287 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2288 if (IS_ERR(ctrl->ctrl.connect_q)) {
2289 ret = PTR_ERR(ctrl->ctrl.connect_q);
2290 goto out_free_tag_set;
2291 }
2292
2293 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2294 if (ret)
2295 goto out_cleanup_blk_queue;
2296
2297 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2298 if (ret)
2299 goto out_delete_hw_queues;
2300
2301 return 0;
2302
2303out_delete_hw_queues:
2304 nvme_fc_delete_hw_io_queues(ctrl);
2305out_cleanup_blk_queue:
2306 blk_cleanup_queue(ctrl->ctrl.connect_q);
2307out_free_tag_set:
2308 blk_mq_free_tag_set(&ctrl->tag_set);
2309 nvme_fc_free_io_queues(ctrl);
2310
2311
2312 ctrl->ctrl.tagset = NULL;
2313
2314 return ret;
2315}
2316
2317static int
2318nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2319{
2320 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2321 unsigned int nr_io_queues;
2322 int ret;
2323
2324 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2325 ctrl->lport->ops->max_hw_queues);
2326 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2327 if (ret) {
2328 dev_info(ctrl->ctrl.device,
2329 "set_queue_count failed: %d\n", ret);
2330 return ret;
2331 }
2332
2333 ctrl->ctrl.queue_count = nr_io_queues + 1;
2334
2335 if (ctrl->ctrl.queue_count == 1)
2336 return 0;
2337
2338 nvme_fc_init_io_queues(ctrl);
2339
2340 ret = blk_mq_reinit_tagset(&ctrl->tag_set, nvme_fc_reinit_request);
2341 if (ret)
2342 goto out_free_io_queues;
2343
2344 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2345 if (ret)
2346 goto out_free_io_queues;
2347
2348 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2349 if (ret)
2350 goto out_delete_hw_queues;
2351
2352 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2353
2354 return 0;
2355
2356out_delete_hw_queues:
2357 nvme_fc_delete_hw_io_queues(ctrl);
2358out_free_io_queues:
2359 nvme_fc_free_io_queues(ctrl);
2360 return ret;
2361}
2362
2363
2364
2365
2366
2367static int
2368nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2369{
2370 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2371 u32 segs;
2372 int ret;
2373 bool changed;
2374
2375 ++ctrl->ctrl.nr_reconnects;
2376
2377
2378
2379
2380
2381 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2382
2383 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2384 NVME_FC_AQ_BLKMQ_DEPTH);
2385 if (ret)
2386 goto out_free_queue;
2387
2388 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2389 NVME_FC_AQ_BLKMQ_DEPTH,
2390 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
2391 if (ret)
2392 goto out_delete_hw_queue;
2393
2394 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2395 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2396
2397 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2398 if (ret)
2399 goto out_disconnect_admin_queue;
2400
2401
2402
2403
2404
2405
2406
2407
2408 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2409 if (ret) {
2410 dev_err(ctrl->ctrl.device,
2411 "prop_get NVME_REG_CAP failed\n");
2412 goto out_disconnect_admin_queue;
2413 }
2414
2415 ctrl->ctrl.sqsize =
2416 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
2417
2418 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2419 if (ret)
2420 goto out_disconnect_admin_queue;
2421
2422 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2423 ctrl->lport->ops->max_sgl_segments);
2424 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2425
2426 ret = nvme_init_identify(&ctrl->ctrl);
2427 if (ret)
2428 goto out_disconnect_admin_queue;
2429
2430
2431
2432
2433 if (ctrl->ctrl.icdoff) {
2434 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2435 ctrl->ctrl.icdoff);
2436 goto out_disconnect_admin_queue;
2437 }
2438
2439
2440
2441 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2442
2443 dev_warn(ctrl->ctrl.device,
2444 "queue_size %zu > ctrl maxcmd %u, reducing "
2445 "to queue_size\n",
2446 opts->queue_size, ctrl->ctrl.maxcmd);
2447 opts->queue_size = ctrl->ctrl.maxcmd;
2448 }
2449
2450 ret = nvme_fc_init_aen_ops(ctrl);
2451 if (ret)
2452 goto out_term_aen_ops;
2453
2454
2455
2456
2457
2458 if (ctrl->ctrl.queue_count > 1) {
2459 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2460 ret = nvme_fc_create_io_queues(ctrl);
2461 else
2462 ret = nvme_fc_reinit_io_queues(ctrl);
2463 if (ret)
2464 goto out_term_aen_ops;
2465 }
2466
2467 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2468 WARN_ON_ONCE(!changed);
2469
2470 ctrl->ctrl.nr_reconnects = 0;
2471
2472 nvme_start_ctrl(&ctrl->ctrl);
2473
2474 return 0;
2475
2476out_term_aen_ops:
2477 nvme_fc_term_aen_ops(ctrl);
2478out_disconnect_admin_queue:
2479
2480 nvme_fc_xmt_disconnect_assoc(ctrl);
2481out_delete_hw_queue:
2482 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2483out_free_queue:
2484 nvme_fc_free_queue(&ctrl->queues[0]);
2485
2486 return ret;
2487}
2488
2489
2490
2491
2492
2493
2494
2495static void
2496nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2497{
2498 unsigned long flags;
2499
2500 spin_lock_irqsave(&ctrl->lock, flags);
2501 ctrl->flags |= FCCTRL_TERMIO;
2502 ctrl->iocnt = 0;
2503 spin_unlock_irqrestore(&ctrl->lock, flags);
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 if (ctrl->ctrl.queue_count > 1) {
2518 nvme_stop_queues(&ctrl->ctrl);
2519 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2520 nvme_fc_terminate_exchange, &ctrl->ctrl);
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2541 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2542 nvme_fc_terminate_exchange, &ctrl->ctrl);
2543
2544
2545 nvme_fc_abort_aen_ops(ctrl);
2546
2547
2548 spin_lock_irq(&ctrl->lock);
2549 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2550 ctrl->flags &= ~FCCTRL_TERMIO;
2551 spin_unlock_irq(&ctrl->lock);
2552
2553 nvme_fc_term_aen_ops(ctrl);
2554
2555
2556
2557
2558
2559
2560
2561 if (ctrl->association_id)
2562 nvme_fc_xmt_disconnect_assoc(ctrl);
2563
2564 if (ctrl->ctrl.tagset) {
2565 nvme_fc_delete_hw_io_queues(ctrl);
2566 nvme_fc_free_io_queues(ctrl);
2567 }
2568
2569 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2570 nvme_fc_free_queue(&ctrl->queues[0]);
2571}
2572
2573static void
2574nvme_fc_delete_ctrl_work(struct work_struct *work)
2575{
2576 struct nvme_fc_ctrl *ctrl =
2577 container_of(work, struct nvme_fc_ctrl, delete_work);
2578
2579 cancel_work_sync(&ctrl->ctrl.reset_work);
2580 cancel_delayed_work_sync(&ctrl->connect_work);
2581 nvme_stop_ctrl(&ctrl->ctrl);
2582 nvme_remove_namespaces(&ctrl->ctrl);
2583
2584
2585
2586
2587 nvme_fc_delete_association(ctrl);
2588
2589
2590
2591
2592
2593
2594
2595
2596 nvme_uninit_ctrl(&ctrl->ctrl);
2597
2598 nvme_put_ctrl(&ctrl->ctrl);
2599}
2600
2601static bool
2602__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
2603{
2604 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2605 return true;
2606
2607 if (!queue_work(nvme_wq, &ctrl->delete_work))
2608 return true;
2609
2610 return false;
2611}
2612
2613static int
2614__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2615{
2616 return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
2617}
2618
2619
2620
2621
2622static int
2623nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2624{
2625 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2626 int ret;
2627
2628 if (!kref_get_unless_zero(&ctrl->ctrl.kref))
2629 return -EBUSY;
2630
2631 ret = __nvme_fc_del_ctrl(ctrl);
2632
2633 if (!ret)
2634 flush_workqueue(nvme_wq);
2635
2636 nvme_put_ctrl(&ctrl->ctrl);
2637
2638 return ret;
2639}
2640
2641static void
2642nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2643{
2644
2645 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
2646 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
2647 ctrl->ctrl.state == NVME_CTRL_LIVE);
2648 return;
2649 }
2650
2651 dev_info(ctrl->ctrl.device,
2652 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2653 ctrl->cnum, status);
2654
2655 if (nvmf_should_reconnect(&ctrl->ctrl)) {
2656 dev_info(ctrl->ctrl.device,
2657 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2658 ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2659 queue_delayed_work(nvme_wq, &ctrl->connect_work,
2660 ctrl->ctrl.opts->reconnect_delay * HZ);
2661 } else {
2662 dev_warn(ctrl->ctrl.device,
2663 "NVME-FC{%d}: Max reconnect attempts (%d) "
2664 "reached. Removing controller\n",
2665 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2666 WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
2667 }
2668}
2669
2670static void
2671nvme_fc_reset_ctrl_work(struct work_struct *work)
2672{
2673 struct nvme_fc_ctrl *ctrl =
2674 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2675 int ret;
2676
2677 nvme_stop_ctrl(&ctrl->ctrl);
2678
2679 nvme_fc_delete_association(ctrl);
2680
2681 ret = nvme_fc_create_association(ctrl);
2682 if (ret)
2683 nvme_fc_reconnect_or_delete(ctrl, ret);
2684 else
2685 dev_info(ctrl->ctrl.device,
2686 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2687}
2688
2689static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2690 .name = "fc",
2691 .module = THIS_MODULE,
2692 .flags = NVME_F_FABRICS,
2693 .reg_read32 = nvmf_reg_read32,
2694 .reg_read64 = nvmf_reg_read64,
2695 .reg_write32 = nvmf_reg_write32,
2696 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2697 .submit_async_event = nvme_fc_submit_async_event,
2698 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2699 .get_address = nvmf_get_address,
2700};
2701
2702static void
2703nvme_fc_connect_ctrl_work(struct work_struct *work)
2704{
2705 int ret;
2706
2707 struct nvme_fc_ctrl *ctrl =
2708 container_of(to_delayed_work(work),
2709 struct nvme_fc_ctrl, connect_work);
2710
2711 ret = nvme_fc_create_association(ctrl);
2712 if (ret)
2713 nvme_fc_reconnect_or_delete(ctrl, ret);
2714 else
2715 dev_info(ctrl->ctrl.device,
2716 "NVME-FC{%d}: controller reconnect complete\n",
2717 ctrl->cnum);
2718}
2719
2720
2721static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2722 .queue_rq = nvme_fc_queue_rq,
2723 .complete = nvme_fc_complete_rq,
2724 .init_request = nvme_fc_init_request,
2725 .exit_request = nvme_fc_exit_request,
2726 .init_hctx = nvme_fc_init_admin_hctx,
2727 .timeout = nvme_fc_timeout,
2728};
2729
2730
2731static struct nvme_ctrl *
2732nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2733 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2734{
2735 struct nvme_fc_ctrl *ctrl;
2736 unsigned long flags;
2737 int ret, idx, retry;
2738
2739 if (!(rport->remoteport.port_role &
2740 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2741 ret = -EBADR;
2742 goto out_fail;
2743 }
2744
2745 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2746 if (!ctrl) {
2747 ret = -ENOMEM;
2748 goto out_fail;
2749 }
2750
2751 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2752 if (idx < 0) {
2753 ret = -ENOSPC;
2754 goto out_free_ctrl;
2755 }
2756
2757 ctrl->ctrl.opts = opts;
2758 INIT_LIST_HEAD(&ctrl->ctrl_list);
2759 ctrl->lport = lport;
2760 ctrl->rport = rport;
2761 ctrl->dev = lport->dev;
2762 ctrl->cnum = idx;
2763 init_waitqueue_head(&ctrl->ioabort_wait);
2764
2765 get_device(ctrl->dev);
2766 kref_init(&ctrl->ref);
2767
2768 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2769 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
2770 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2771 spin_lock_init(&ctrl->lock);
2772
2773
2774 ctrl->ctrl.queue_count = min_t(unsigned int,
2775 opts->nr_io_queues,
2776 lport->ops->max_hw_queues);
2777 ctrl->ctrl.queue_count++;
2778
2779 ctrl->ctrl.sqsize = opts->queue_size - 1;
2780 ctrl->ctrl.kato = opts->kato;
2781
2782 ret = -ENOMEM;
2783 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
2784 sizeof(struct nvme_fc_queue), GFP_KERNEL);
2785 if (!ctrl->queues)
2786 goto out_free_ida;
2787
2788 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2789 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2790 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2791 ctrl->admin_tag_set.reserved_tags = 2;
2792 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2793 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2794 (SG_CHUNK_SIZE *
2795 sizeof(struct scatterlist)) +
2796 ctrl->lport->ops->fcprqst_priv_sz;
2797 ctrl->admin_tag_set.driver_data = ctrl;
2798 ctrl->admin_tag_set.nr_hw_queues = 1;
2799 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2800
2801 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2802 if (ret)
2803 goto out_free_queues;
2804 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
2805
2806 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2807 if (IS_ERR(ctrl->ctrl.admin_q)) {
2808 ret = PTR_ERR(ctrl->ctrl.admin_q);
2809 goto out_free_admin_tag_set;
2810 }
2811
2812
2813
2814
2815
2816
2817
2818
2819 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2820 if (ret)
2821 goto out_cleanup_admin_q;
2822
2823
2824
2825 spin_lock_irqsave(&rport->lock, flags);
2826 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2827 spin_unlock_irqrestore(&rport->lock, flags);
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847 for (retry = 0; retry < 3; retry++) {
2848 ret = nvme_fc_create_association(ctrl);
2849 if (!ret)
2850 break;
2851 }
2852
2853 if (ret) {
2854
2855 dev_err(ctrl->ctrl.device,
2856 "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
2857
2858 ctrl->ctrl.opts = NULL;
2859
2860
2861 nvme_uninit_ctrl(&ctrl->ctrl);
2862 nvme_put_ctrl(&ctrl->ctrl);
2863
2864
2865 nvme_put_ctrl(&ctrl->ctrl);
2866
2867
2868
2869
2870
2871
2872
2873
2874 nvme_fc_rport_get(rport);
2875
2876 if (ret > 0)
2877 ret = -EIO;
2878 return ERR_PTR(ret);
2879 }
2880
2881 kref_get(&ctrl->ctrl.kref);
2882
2883 dev_info(ctrl->ctrl.device,
2884 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2885 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2886
2887 return &ctrl->ctrl;
2888
2889out_cleanup_admin_q:
2890 blk_cleanup_queue(ctrl->ctrl.admin_q);
2891out_free_admin_tag_set:
2892 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2893out_free_queues:
2894 kfree(ctrl->queues);
2895out_free_ida:
2896 put_device(ctrl->dev);
2897 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2898out_free_ctrl:
2899 kfree(ctrl);
2900out_fail:
2901
2902 return ERR_PTR(ret);
2903}
2904
2905
2906struct nvmet_fc_traddr {
2907 u64 nn;
2908 u64 pn;
2909};
2910
2911static int
2912__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2913{
2914 u64 token64;
2915
2916 if (match_u64(sstr, &token64))
2917 return -EINVAL;
2918 *val = token64;
2919
2920 return 0;
2921}
2922
2923
2924
2925
2926
2927
2928static int
2929nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2930{
2931 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2932 substring_t wwn = { name, &name[sizeof(name)-1] };
2933 int nnoffset, pnoffset;
2934
2935
2936 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2937 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2938 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2939 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2940 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2941 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2942 NVME_FC_TRADDR_OXNNLEN;
2943 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2944 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2945 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2946 "pn-", NVME_FC_TRADDR_NNLEN))) {
2947 nnoffset = NVME_FC_TRADDR_NNLEN;
2948 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2949 } else
2950 goto out_einval;
2951
2952 name[0] = '0';
2953 name[1] = 'x';
2954 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2955
2956 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2957 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2958 goto out_einval;
2959
2960 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2961 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2962 goto out_einval;
2963
2964 return 0;
2965
2966out_einval:
2967 pr_warn("%s: bad traddr string\n", __func__);
2968 return -EINVAL;
2969}
2970
2971static struct nvme_ctrl *
2972nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2973{
2974 struct nvme_fc_lport *lport;
2975 struct nvme_fc_rport *rport;
2976 struct nvme_ctrl *ctrl;
2977 struct nvmet_fc_traddr laddr = { 0L, 0L };
2978 struct nvmet_fc_traddr raddr = { 0L, 0L };
2979 unsigned long flags;
2980 int ret;
2981
2982 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
2983 if (ret || !raddr.nn || !raddr.pn)
2984 return ERR_PTR(-EINVAL);
2985
2986 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
2987 if (ret || !laddr.nn || !laddr.pn)
2988 return ERR_PTR(-EINVAL);
2989
2990
2991 spin_lock_irqsave(&nvme_fc_lock, flags);
2992 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2993 if (lport->localport.node_name != laddr.nn ||
2994 lport->localport.port_name != laddr.pn)
2995 continue;
2996
2997 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2998 if (rport->remoteport.node_name != raddr.nn ||
2999 rport->remoteport.port_name != raddr.pn)
3000 continue;
3001
3002
3003 if (!nvme_fc_rport_get(rport))
3004 break;
3005
3006 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3007
3008 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3009 if (IS_ERR(ctrl))
3010 nvme_fc_rport_put(rport);
3011 return ctrl;
3012 }
3013 }
3014 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3015
3016 return ERR_PTR(-ENOENT);
3017}
3018
3019
3020static struct nvmf_transport_ops nvme_fc_transport = {
3021 .name = "fc",
3022 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3023 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3024 .create_ctrl = nvme_fc_create_ctrl,
3025};
3026
3027static int __init nvme_fc_init_module(void)
3028{
3029 return nvmf_register_transport(&nvme_fc_transport);
3030}
3031
3032static void __exit nvme_fc_exit_module(void)
3033{
3034
3035 if (!list_empty(&nvme_fc_lport_list))
3036 pr_warn("%s: localport list not empty\n", __func__);
3037
3038 nvmf_unregister_transport(&nvme_fc_transport);
3039
3040 ida_destroy(&nvme_fc_local_port_cnt);
3041 ida_destroy(&nvme_fc_ctrl_cnt);
3042}
3043
3044module_init(nvme_fc_init_module);
3045module_exit(nvme_fc_exit_module);
3046
3047MODULE_LICENSE("GPL v2");
3048