1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22
23#include "nvme.h"
24#include "fabrics.h"
25#include <linux/nvme-fc-driver.h>
26#include <linux/nvme-fc.h>
27
28
29
30
31
32
33
34
35
36#define NVME_FC_NR_AEN_COMMANDS 1
37#define NVME_FC_AQ_BLKMQ_DEPTH \
38 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
39#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
40
41enum nvme_fc_queue_flags {
42 NVME_FC_Q_CONNECTED = (1 << 0),
43};
44
45#define NVMEFC_QUEUE_DELAY 3
46
47struct nvme_fc_queue {
48 struct nvme_fc_ctrl *ctrl;
49 struct device *dev;
50 struct blk_mq_hw_ctx *hctx;
51 void *lldd_handle;
52 int queue_size;
53 size_t cmnd_capsule_len;
54 u32 qnum;
55 u32 rqcnt;
56 u32 seqno;
57
58 u64 connection_id;
59 atomic_t csn;
60
61 unsigned long flags;
62} __aligned(sizeof(u64));
63
64struct nvmefc_ls_req_op {
65 struct nvmefc_ls_req ls_req;
66
67 struct nvme_fc_ctrl *ctrl;
68 struct nvme_fc_queue *queue;
69 struct request *rq;
70
71 int ls_error;
72 struct completion ls_done;
73 struct list_head lsreq_list;
74 bool req_queued;
75};
76
77enum nvme_fcpop_state {
78 FCPOP_STATE_UNINIT = 0,
79 FCPOP_STATE_IDLE = 1,
80 FCPOP_STATE_ACTIVE = 2,
81 FCPOP_STATE_ABORTED = 3,
82};
83
84struct nvme_fc_fcp_op {
85 struct nvme_request nreq;
86
87
88
89
90
91
92
93 struct nvmefc_fcp_req fcp_req;
94
95 struct nvme_fc_ctrl *ctrl;
96 struct nvme_fc_queue *queue;
97 struct request *rq;
98
99 atomic_t state;
100 u32 rqno;
101 u32 nents;
102
103 struct nvme_fc_cmd_iu cmd_iu;
104 struct nvme_fc_ersp_iu rsp_iu;
105};
106
107struct nvme_fc_lport {
108 struct nvme_fc_local_port localport;
109
110 struct ida endp_cnt;
111 struct list_head port_list;
112 struct list_head endp_list;
113 struct device *dev;
114 struct nvme_fc_port_template *ops;
115 struct kref ref;
116} __aligned(sizeof(u64));
117
118struct nvme_fc_rport {
119 struct nvme_fc_remote_port remoteport;
120
121 struct list_head endp_list;
122 struct list_head ctrl_list;
123 spinlock_t lock;
124 struct kref ref;
125} __aligned(sizeof(u64));
126
127enum nvme_fcctrl_state {
128 FCCTRL_INIT = 0,
129 FCCTRL_ACTIVE = 1,
130};
131
132struct nvme_fc_ctrl {
133 spinlock_t lock;
134 struct nvme_fc_queue *queues;
135 u32 queue_count;
136
137 struct device *dev;
138 struct nvme_fc_lport *lport;
139 struct nvme_fc_rport *rport;
140 u32 cnum;
141
142 u64 association_id;
143
144 u64 cap;
145
146 struct list_head ctrl_list;
147 struct list_head ls_req_list;
148
149 struct blk_mq_tag_set admin_tag_set;
150 struct blk_mq_tag_set tag_set;
151
152 struct work_struct delete_work;
153 struct kref ref;
154 int state;
155
156 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
157
158 struct nvme_ctrl ctrl;
159};
160
161static inline struct nvme_fc_ctrl *
162to_fc_ctrl(struct nvme_ctrl *ctrl)
163{
164 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
165}
166
167static inline struct nvme_fc_lport *
168localport_to_lport(struct nvme_fc_local_port *portptr)
169{
170 return container_of(portptr, struct nvme_fc_lport, localport);
171}
172
173static inline struct nvme_fc_rport *
174remoteport_to_rport(struct nvme_fc_remote_port *portptr)
175{
176 return container_of(portptr, struct nvme_fc_rport, remoteport);
177}
178
179static inline struct nvmefc_ls_req_op *
180ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
181{
182 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
183}
184
185static inline struct nvme_fc_fcp_op *
186fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
187{
188 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
189}
190
191
192
193
194
195
196static DEFINE_SPINLOCK(nvme_fc_lock);
197
198static LIST_HEAD(nvme_fc_lport_list);
199static DEFINE_IDA(nvme_fc_local_port_cnt);
200static DEFINE_IDA(nvme_fc_ctrl_cnt);
201
202static struct workqueue_struct *nvme_fc_wq;
203
204
205
206
207
208static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
209static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
210 struct nvme_fc_queue *, unsigned int);
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230int
231nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
232 struct nvme_fc_port_template *template,
233 struct device *dev,
234 struct nvme_fc_local_port **portptr)
235{
236 struct nvme_fc_lport *newrec;
237 unsigned long flags;
238 int ret, idx;
239
240 if (!template->localport_delete || !template->remoteport_delete ||
241 !template->ls_req || !template->fcp_io ||
242 !template->ls_abort || !template->fcp_abort ||
243 !template->max_hw_queues || !template->max_sgl_segments ||
244 !template->max_dif_sgl_segments || !template->dma_boundary) {
245 ret = -EINVAL;
246 goto out_reghost_failed;
247 }
248
249 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
250 GFP_KERNEL);
251 if (!newrec) {
252 ret = -ENOMEM;
253 goto out_reghost_failed;
254 }
255
256 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
257 if (idx < 0) {
258 ret = -ENOSPC;
259 goto out_fail_kfree;
260 }
261
262 if (!get_device(dev) && dev) {
263 ret = -ENODEV;
264 goto out_ida_put;
265 }
266
267 INIT_LIST_HEAD(&newrec->port_list);
268 INIT_LIST_HEAD(&newrec->endp_list);
269 kref_init(&newrec->ref);
270 newrec->ops = template;
271 newrec->dev = dev;
272 ida_init(&newrec->endp_cnt);
273 newrec->localport.private = &newrec[1];
274 newrec->localport.node_name = pinfo->node_name;
275 newrec->localport.port_name = pinfo->port_name;
276 newrec->localport.port_role = pinfo->port_role;
277 newrec->localport.port_id = pinfo->port_id;
278 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
279 newrec->localport.port_num = idx;
280
281 spin_lock_irqsave(&nvme_fc_lock, flags);
282 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
283 spin_unlock_irqrestore(&nvme_fc_lock, flags);
284
285 if (dev)
286 dma_set_seg_boundary(dev, template->dma_boundary);
287
288 *portptr = &newrec->localport;
289 return 0;
290
291out_ida_put:
292 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
293out_fail_kfree:
294 kfree(newrec);
295out_reghost_failed:
296 *portptr = NULL;
297
298 return ret;
299}
300EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
301
302static void
303nvme_fc_free_lport(struct kref *ref)
304{
305 struct nvme_fc_lport *lport =
306 container_of(ref, struct nvme_fc_lport, ref);
307 unsigned long flags;
308
309 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
310 WARN_ON(!list_empty(&lport->endp_list));
311
312
313 spin_lock_irqsave(&nvme_fc_lock, flags);
314 list_del(&lport->port_list);
315 spin_unlock_irqrestore(&nvme_fc_lock, flags);
316
317
318 lport->ops->localport_delete(&lport->localport);
319
320 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
321 ida_destroy(&lport->endp_cnt);
322
323 put_device(lport->dev);
324
325 kfree(lport);
326}
327
328static void
329nvme_fc_lport_put(struct nvme_fc_lport *lport)
330{
331 kref_put(&lport->ref, nvme_fc_free_lport);
332}
333
334static int
335nvme_fc_lport_get(struct nvme_fc_lport *lport)
336{
337 return kref_get_unless_zero(&lport->ref);
338}
339
340
341
342
343
344
345
346
347
348
349
350
351int
352nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
353{
354 struct nvme_fc_lport *lport = localport_to_lport(portptr);
355 unsigned long flags;
356
357 if (!portptr)
358 return -EINVAL;
359
360 spin_lock_irqsave(&nvme_fc_lock, flags);
361
362 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
363 spin_unlock_irqrestore(&nvme_fc_lock, flags);
364 return -EINVAL;
365 }
366 portptr->port_state = FC_OBJSTATE_DELETED;
367
368 spin_unlock_irqrestore(&nvme_fc_lock, flags);
369
370 nvme_fc_lport_put(lport);
371
372 return 0;
373}
374EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392int
393nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
394 struct nvme_fc_port_info *pinfo,
395 struct nvme_fc_remote_port **portptr)
396{
397 struct nvme_fc_lport *lport = localport_to_lport(localport);
398 struct nvme_fc_rport *newrec;
399 unsigned long flags;
400 int ret, idx;
401
402 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
403 GFP_KERNEL);
404 if (!newrec) {
405 ret = -ENOMEM;
406 goto out_reghost_failed;
407 }
408
409 if (!nvme_fc_lport_get(lport)) {
410 ret = -ESHUTDOWN;
411 goto out_kfree_rport;
412 }
413
414 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
415 if (idx < 0) {
416 ret = -ENOSPC;
417 goto out_lport_put;
418 }
419
420 INIT_LIST_HEAD(&newrec->endp_list);
421 INIT_LIST_HEAD(&newrec->ctrl_list);
422 kref_init(&newrec->ref);
423 spin_lock_init(&newrec->lock);
424 newrec->remoteport.localport = &lport->localport;
425 newrec->remoteport.private = &newrec[1];
426 newrec->remoteport.port_role = pinfo->port_role;
427 newrec->remoteport.node_name = pinfo->node_name;
428 newrec->remoteport.port_name = pinfo->port_name;
429 newrec->remoteport.port_id = pinfo->port_id;
430 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
431 newrec->remoteport.port_num = idx;
432
433 spin_lock_irqsave(&nvme_fc_lock, flags);
434 list_add_tail(&newrec->endp_list, &lport->endp_list);
435 spin_unlock_irqrestore(&nvme_fc_lock, flags);
436
437 *portptr = &newrec->remoteport;
438 return 0;
439
440out_lport_put:
441 nvme_fc_lport_put(lport);
442out_kfree_rport:
443 kfree(newrec);
444out_reghost_failed:
445 *portptr = NULL;
446 return ret;
447
448}
449EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
450
451static void
452nvme_fc_free_rport(struct kref *ref)
453{
454 struct nvme_fc_rport *rport =
455 container_of(ref, struct nvme_fc_rport, ref);
456 struct nvme_fc_lport *lport =
457 localport_to_lport(rport->remoteport.localport);
458 unsigned long flags;
459
460 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
461 WARN_ON(!list_empty(&rport->ctrl_list));
462
463
464 spin_lock_irqsave(&nvme_fc_lock, flags);
465 list_del(&rport->endp_list);
466 spin_unlock_irqrestore(&nvme_fc_lock, flags);
467
468
469 lport->ops->remoteport_delete(&rport->remoteport);
470
471 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
472
473 kfree(rport);
474
475 nvme_fc_lport_put(lport);
476}
477
478static void
479nvme_fc_rport_put(struct nvme_fc_rport *rport)
480{
481 kref_put(&rport->ref, nvme_fc_free_rport);
482}
483
484static int
485nvme_fc_rport_get(struct nvme_fc_rport *rport)
486{
487 return kref_get_unless_zero(&rport->ref);
488}
489
490
491
492
493
494
495
496
497
498
499
500
501int
502nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
503{
504 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
505 struct nvme_fc_ctrl *ctrl;
506 unsigned long flags;
507
508 if (!portptr)
509 return -EINVAL;
510
511 spin_lock_irqsave(&rport->lock, flags);
512
513 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
514 spin_unlock_irqrestore(&rport->lock, flags);
515 return -EINVAL;
516 }
517 portptr->port_state = FC_OBJSTATE_DELETED;
518
519
520 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
521 __nvme_fc_del_ctrl(ctrl);
522
523 spin_unlock_irqrestore(&rport->lock, flags);
524
525 nvme_fc_rport_put(rport);
526 return 0;
527}
528EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549static inline dma_addr_t
550fc_dma_map_single(struct device *dev, void *ptr, size_t size,
551 enum dma_data_direction dir)
552{
553 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
554}
555
556static inline int
557fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
558{
559 return dev ? dma_mapping_error(dev, dma_addr) : 0;
560}
561
562static inline void
563fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
564 enum dma_data_direction dir)
565{
566 if (dev)
567 dma_unmap_single(dev, addr, size, dir);
568}
569
570static inline void
571fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
572 enum dma_data_direction dir)
573{
574 if (dev)
575 dma_sync_single_for_cpu(dev, addr, size, dir);
576}
577
578static inline void
579fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
580 enum dma_data_direction dir)
581{
582 if (dev)
583 dma_sync_single_for_device(dev, addr, size, dir);
584}
585
586
587static int
588fc_map_sg(struct scatterlist *sg, int nents)
589{
590 struct scatterlist *s;
591 int i;
592
593 WARN_ON(nents == 0 || sg[0].length == 0);
594
595 for_each_sg(sg, s, nents, i) {
596 s->dma_address = 0L;
597#ifdef CONFIG_NEED_SG_DMA_LENGTH
598 s->dma_length = s->length;
599#endif
600 }
601 return nents;
602}
603
604static inline int
605fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
606 enum dma_data_direction dir)
607{
608 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
609}
610
611static inline void
612fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
613 enum dma_data_direction dir)
614{
615 if (dev)
616 dma_unmap_sg(dev, sg, nents, dir);
617}
618
619
620
621
622static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
623static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
624
625
626static void
627__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
628 struct nvmefc_ls_req_op *lsop)
629{
630 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
631 unsigned long flags;
632
633 spin_lock_irqsave(&ctrl->lock, flags);
634
635 if (!lsop->req_queued) {
636 spin_unlock_irqrestore(&ctrl->lock, flags);
637 return;
638 }
639
640 list_del(&lsop->lsreq_list);
641
642 lsop->req_queued = false;
643
644 spin_unlock_irqrestore(&ctrl->lock, flags);
645
646 fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
647 (lsreq->rqstlen + lsreq->rsplen),
648 DMA_BIDIRECTIONAL);
649
650 nvme_fc_ctrl_put(ctrl);
651}
652
653static int
654__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
655 struct nvmefc_ls_req_op *lsop,
656 void (*done)(struct nvmefc_ls_req *req, int status))
657{
658 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
659 unsigned long flags;
660 int ret;
661
662 if (!nvme_fc_ctrl_get(ctrl))
663 return -ESHUTDOWN;
664
665 lsreq->done = done;
666 lsop->ctrl = ctrl;
667 lsop->req_queued = false;
668 INIT_LIST_HEAD(&lsop->lsreq_list);
669 init_completion(&lsop->ls_done);
670
671 lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
672 lsreq->rqstlen + lsreq->rsplen,
673 DMA_BIDIRECTIONAL);
674 if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
675 nvme_fc_ctrl_put(ctrl);
676 dev_err(ctrl->dev,
677 "els request command failed EFAULT.\n");
678 return -EFAULT;
679 }
680 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
681
682 spin_lock_irqsave(&ctrl->lock, flags);
683
684 list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
685
686 lsop->req_queued = true;
687
688 spin_unlock_irqrestore(&ctrl->lock, flags);
689
690 ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
691 &ctrl->rport->remoteport, lsreq);
692 if (ret)
693 lsop->ls_error = ret;
694
695 return ret;
696}
697
698static void
699nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
700{
701 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
702
703 lsop->ls_error = status;
704 complete(&lsop->ls_done);
705}
706
707static int
708nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
709{
710 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
711 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
712 int ret;
713
714 ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
715
716 if (!ret)
717
718
719
720
721
722
723 wait_for_completion(&lsop->ls_done);
724
725 __nvme_fc_finish_ls_req(ctrl, lsop);
726
727 if (ret) {
728 dev_err(ctrl->dev,
729 "ls request command failed (%d).\n", ret);
730 return ret;
731 }
732
733
734 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
735 return -ENXIO;
736
737 return 0;
738}
739
740static void
741nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
742 struct nvmefc_ls_req_op *lsop,
743 void (*done)(struct nvmefc_ls_req *req, int status))
744{
745 int ret;
746
747 ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
748
749
750
751 if (ret)
752 done(&lsop->ls_req, ret);
753}
754
755
756enum {
757 VERR_NO_ERROR = 0,
758 VERR_LSACC = 1,
759 VERR_LSDESC_RQST = 2,
760 VERR_LSDESC_RQST_LEN = 3,
761 VERR_ASSOC_ID = 4,
762 VERR_ASSOC_ID_LEN = 5,
763 VERR_CONN_ID = 6,
764 VERR_CONN_ID_LEN = 7,
765 VERR_CR_ASSOC = 8,
766 VERR_CR_ASSOC_ACC_LEN = 9,
767 VERR_CR_CONN = 10,
768 VERR_CR_CONN_ACC_LEN = 11,
769 VERR_DISCONN = 12,
770 VERR_DISCONN_ACC_LEN = 13,
771};
772
773static char *validation_errors[] = {
774 "OK",
775 "Not LS_ACC",
776 "Not LSDESC_RQST",
777 "Bad LSDESC_RQST Length",
778 "Not Association ID",
779 "Bad Association ID Length",
780 "Not Connection ID",
781 "Bad Connection ID Length",
782 "Not CR_ASSOC Rqst",
783 "Bad CR_ASSOC ACC Length",
784 "Not CR_CONN Rqst",
785 "Bad CR_CONN ACC Length",
786 "Not Disconnect Rqst",
787 "Bad Disconnect ACC Length",
788};
789
790static int
791nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
792 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
793{
794 struct nvmefc_ls_req_op *lsop;
795 struct nvmefc_ls_req *lsreq;
796 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
797 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
798 int ret, fcret = 0;
799
800 lsop = kzalloc((sizeof(*lsop) +
801 ctrl->lport->ops->lsrqst_priv_sz +
802 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
803 if (!lsop) {
804 ret = -ENOMEM;
805 goto out_no_memory;
806 }
807 lsreq = &lsop->ls_req;
808
809 lsreq->private = (void *)&lsop[1];
810 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
811 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
812 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
813
814 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
815 assoc_rqst->desc_list_len =
816 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
817
818 assoc_rqst->assoc_cmd.desc_tag =
819 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
820 assoc_rqst->assoc_cmd.desc_len =
821 fcnvme_lsdesc_len(
822 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
823
824 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
825 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
826
827 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
828 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
829 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
830 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
831 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
832 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
833 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
834
835 lsop->queue = queue;
836 lsreq->rqstaddr = assoc_rqst;
837 lsreq->rqstlen = sizeof(*assoc_rqst);
838 lsreq->rspaddr = assoc_acc;
839 lsreq->rsplen = sizeof(*assoc_acc);
840 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
841
842 ret = nvme_fc_send_ls_req(ctrl, lsop);
843 if (ret)
844 goto out_free_buffer;
845
846
847
848
849 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
850 fcret = VERR_LSACC;
851 if (assoc_acc->hdr.desc_list_len !=
852 fcnvme_lsdesc_len(
853 sizeof(struct fcnvme_ls_cr_assoc_acc)))
854 fcret = VERR_CR_ASSOC_ACC_LEN;
855 if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
856 fcret = VERR_LSDESC_RQST;
857 else if (assoc_acc->hdr.rqst.desc_len !=
858 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
859 fcret = VERR_LSDESC_RQST_LEN;
860 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
861 fcret = VERR_CR_ASSOC;
862 else if (assoc_acc->associd.desc_tag !=
863 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
864 fcret = VERR_ASSOC_ID;
865 else if (assoc_acc->associd.desc_len !=
866 fcnvme_lsdesc_len(
867 sizeof(struct fcnvme_lsdesc_assoc_id)))
868 fcret = VERR_ASSOC_ID_LEN;
869 else if (assoc_acc->connectid.desc_tag !=
870 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
871 fcret = VERR_CONN_ID;
872 else if (assoc_acc->connectid.desc_len !=
873 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
874 fcret = VERR_CONN_ID_LEN;
875
876 if (fcret) {
877 ret = -EBADF;
878 dev_err(ctrl->dev,
879 "q %d connect failed: %s\n",
880 queue->qnum, validation_errors[fcret]);
881 } else {
882 ctrl->association_id =
883 be64_to_cpu(assoc_acc->associd.association_id);
884 queue->connection_id =
885 be64_to_cpu(assoc_acc->connectid.connection_id);
886 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
887 }
888
889out_free_buffer:
890 kfree(lsop);
891out_no_memory:
892 if (ret)
893 dev_err(ctrl->dev,
894 "queue %d connect admin queue failed (%d).\n",
895 queue->qnum, ret);
896 return ret;
897}
898
899static int
900nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
901 u16 qsize, u16 ersp_ratio)
902{
903 struct nvmefc_ls_req_op *lsop;
904 struct nvmefc_ls_req *lsreq;
905 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
906 struct fcnvme_ls_cr_conn_acc *conn_acc;
907 int ret, fcret = 0;
908
909 lsop = kzalloc((sizeof(*lsop) +
910 ctrl->lport->ops->lsrqst_priv_sz +
911 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
912 if (!lsop) {
913 ret = -ENOMEM;
914 goto out_no_memory;
915 }
916 lsreq = &lsop->ls_req;
917
918 lsreq->private = (void *)&lsop[1];
919 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
920 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
921 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
922
923 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
924 conn_rqst->desc_list_len = cpu_to_be32(
925 sizeof(struct fcnvme_lsdesc_assoc_id) +
926 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
927
928 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
929 conn_rqst->associd.desc_len =
930 fcnvme_lsdesc_len(
931 sizeof(struct fcnvme_lsdesc_assoc_id));
932 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
933 conn_rqst->connect_cmd.desc_tag =
934 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
935 conn_rqst->connect_cmd.desc_len =
936 fcnvme_lsdesc_len(
937 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
938 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
939 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
940 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
941
942 lsop->queue = queue;
943 lsreq->rqstaddr = conn_rqst;
944 lsreq->rqstlen = sizeof(*conn_rqst);
945 lsreq->rspaddr = conn_acc;
946 lsreq->rsplen = sizeof(*conn_acc);
947 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
948
949 ret = nvme_fc_send_ls_req(ctrl, lsop);
950 if (ret)
951 goto out_free_buffer;
952
953
954
955
956 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
957 fcret = VERR_LSACC;
958 if (conn_acc->hdr.desc_list_len !=
959 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
960 fcret = VERR_CR_CONN_ACC_LEN;
961 if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
962 fcret = VERR_LSDESC_RQST;
963 else if (conn_acc->hdr.rqst.desc_len !=
964 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
965 fcret = VERR_LSDESC_RQST_LEN;
966 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
967 fcret = VERR_CR_CONN;
968 else if (conn_acc->connectid.desc_tag !=
969 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
970 fcret = VERR_CONN_ID;
971 else if (conn_acc->connectid.desc_len !=
972 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
973 fcret = VERR_CONN_ID_LEN;
974
975 if (fcret) {
976 ret = -EBADF;
977 dev_err(ctrl->dev,
978 "q %d connect failed: %s\n",
979 queue->qnum, validation_errors[fcret]);
980 } else {
981 queue->connection_id =
982 be64_to_cpu(conn_acc->connectid.connection_id);
983 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
984 }
985
986out_free_buffer:
987 kfree(lsop);
988out_no_memory:
989 if (ret)
990 dev_err(ctrl->dev,
991 "queue %d connect command failed (%d).\n",
992 queue->qnum, ret);
993 return ret;
994}
995
996static void
997nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
998{
999 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1000 struct nvme_fc_ctrl *ctrl = lsop->ctrl;
1001
1002 __nvme_fc_finish_ls_req(ctrl, lsop);
1003
1004 if (status)
1005 dev_err(ctrl->dev,
1006 "disconnect assoc ls request command failed (%d).\n",
1007 status);
1008
1009
1010
1011 kfree(lsop);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static void
1032nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1033{
1034 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1035 struct fcnvme_ls_disconnect_acc *discon_acc;
1036 struct nvmefc_ls_req_op *lsop;
1037 struct nvmefc_ls_req *lsreq;
1038
1039 lsop = kzalloc((sizeof(*lsop) +
1040 ctrl->lport->ops->lsrqst_priv_sz +
1041 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1042 GFP_KERNEL);
1043 if (!lsop)
1044
1045 return;
1046
1047 lsreq = &lsop->ls_req;
1048
1049 lsreq->private = (void *)&lsop[1];
1050 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1051 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1052 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1053
1054 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1055 discon_rqst->desc_list_len = cpu_to_be32(
1056 sizeof(struct fcnvme_lsdesc_assoc_id) +
1057 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1058
1059 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1060 discon_rqst->associd.desc_len =
1061 fcnvme_lsdesc_len(
1062 sizeof(struct fcnvme_lsdesc_assoc_id));
1063
1064 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1065
1066 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1067 FCNVME_LSDESC_DISCONN_CMD);
1068 discon_rqst->discon_cmd.desc_len =
1069 fcnvme_lsdesc_len(
1070 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1071 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1072 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1073
1074 lsreq->rqstaddr = discon_rqst;
1075 lsreq->rqstlen = sizeof(*discon_rqst);
1076 lsreq->rspaddr = discon_acc;
1077 lsreq->rsplen = sizeof(*discon_acc);
1078 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1079
1080 nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
1081
1082
1083 ctrl->association_id = 0;
1084}
1085
1086
1087
1088
1089
1090static int
1091nvme_fc_reinit_request(void *data, struct request *rq)
1092{
1093 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1094 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1095
1096 memset(cmdiu, 0, sizeof(*cmdiu));
1097 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1098 cmdiu->fc_id = NVME_CMD_FC_ID;
1099 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1100 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1101
1102 return 0;
1103}
1104
1105static void
1106__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1107 struct nvme_fc_fcp_op *op)
1108{
1109 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1110 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1111 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1112 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1113
1114 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1115}
1116
1117static void
1118nvme_fc_exit_request(void *data, struct request *rq,
1119 unsigned int hctx_idx, unsigned int rq_idx)
1120{
1121 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1122
1123 return __nvme_fc_exit_request(data, op);
1124}
1125
1126static void
1127nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
1128{
1129 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1130 int i;
1131
1132 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1133 if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
1134 continue;
1135 __nvme_fc_exit_request(ctrl, aen_op);
1136 nvme_fc_ctrl_put(ctrl);
1137 }
1138}
1139
1140void
1141nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1142{
1143 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1144 struct request *rq = op->rq;
1145 struct nvmefc_fcp_req *freq = &op->fcp_req;
1146 struct nvme_fc_ctrl *ctrl = op->ctrl;
1147 struct nvme_fc_queue *queue = op->queue;
1148 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1149 u16 status;
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1181 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1182
1183 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1184 status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1185 else
1186 status = freq->status;
1187
1188
1189
1190
1191
1192
1193 if (status)
1194 goto done;
1195
1196
1197
1198
1199
1200
1201
1202
1203 switch (freq->rcv_rsplen) {
1204
1205 case 0:
1206 case NVME_FC_SIZEOF_ZEROS_RSP:
1207
1208
1209
1210
1211
1212 if (freq->transferred_length !=
1213 be32_to_cpu(op->cmd_iu.data_len)) {
1214 status = -EIO;
1215 goto done;
1216 }
1217 op->nreq.result.u64 = 0;
1218 break;
1219
1220 case sizeof(struct nvme_fc_ersp_iu):
1221
1222
1223
1224
1225 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1226 (freq->rcv_rsplen / 4) ||
1227 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1228 freq->transferred_length ||
1229 op->rqno != le16_to_cpu(cqe->command_id))) {
1230 status = -EIO;
1231 goto done;
1232 }
1233 op->nreq.result = cqe->result;
1234 status = le16_to_cpu(cqe->status) >> 1;
1235 break;
1236
1237 default:
1238 status = -EIO;
1239 goto done;
1240 }
1241
1242done:
1243 if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
1244 nvme_complete_async_event(&queue->ctrl->ctrl, status,
1245 &op->nreq.result);
1246 nvme_fc_ctrl_put(ctrl);
1247 return;
1248 }
1249
1250 blk_mq_complete_request(rq, status);
1251}
1252
1253static int
1254__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1255 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1256 struct request *rq, u32 rqno)
1257{
1258 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1259 int ret = 0;
1260
1261 memset(op, 0, sizeof(*op));
1262 op->fcp_req.cmdaddr = &op->cmd_iu;
1263 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1264 op->fcp_req.rspaddr = &op->rsp_iu;
1265 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1266 op->fcp_req.done = nvme_fc_fcpio_done;
1267 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1268 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1269 op->ctrl = ctrl;
1270 op->queue = queue;
1271 op->rq = rq;
1272 op->rqno = rqno;
1273
1274 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1275 cmdiu->fc_id = NVME_CMD_FC_ID;
1276 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1277
1278 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1279 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1280 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1281 dev_err(ctrl->dev,
1282 "FCP Op failed - cmdiu dma mapping failed.\n");
1283 ret = EFAULT;
1284 goto out_on_error;
1285 }
1286
1287 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1288 &op->rsp_iu, sizeof(op->rsp_iu),
1289 DMA_FROM_DEVICE);
1290 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1291 dev_err(ctrl->dev,
1292 "FCP Op failed - rspiu dma mapping failed.\n");
1293 ret = EFAULT;
1294 }
1295
1296 atomic_set(&op->state, FCPOP_STATE_IDLE);
1297out_on_error:
1298 return ret;
1299}
1300
1301static int
1302nvme_fc_init_request(void *data, struct request *rq,
1303 unsigned int hctx_idx, unsigned int rq_idx,
1304 unsigned int numa_node)
1305{
1306 struct nvme_fc_ctrl *ctrl = data;
1307 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1308 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1309
1310 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1311}
1312
1313static int
1314nvme_fc_init_admin_request(void *data, struct request *rq,
1315 unsigned int hctx_idx, unsigned int rq_idx,
1316 unsigned int numa_node)
1317{
1318 struct nvme_fc_ctrl *ctrl = data;
1319 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1320 struct nvme_fc_queue *queue = &ctrl->queues[0];
1321
1322 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1323}
1324
1325static int
1326nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1327{
1328 struct nvme_fc_fcp_op *aen_op;
1329 struct nvme_fc_cmd_iu *cmdiu;
1330 struct nvme_command *sqe;
1331 int i, ret;
1332
1333 aen_op = ctrl->aen_ops;
1334 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1335 cmdiu = &aen_op->cmd_iu;
1336 sqe = &cmdiu->sqe;
1337 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1338 aen_op, (struct request *)NULL,
1339 (AEN_CMDID_BASE + i));
1340 if (ret)
1341 return ret;
1342
1343 memset(sqe, 0, sizeof(*sqe));
1344 sqe->common.opcode = nvme_admin_async_event;
1345 sqe->common.command_id = AEN_CMDID_BASE + i;
1346 }
1347 return 0;
1348}
1349
1350
1351static inline void
1352__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1353 unsigned int qidx)
1354{
1355 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1356
1357 hctx->driver_data = queue;
1358 queue->hctx = hctx;
1359}
1360
1361static int
1362nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1363 unsigned int hctx_idx)
1364{
1365 struct nvme_fc_ctrl *ctrl = data;
1366
1367 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1368
1369 return 0;
1370}
1371
1372static int
1373nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1374 unsigned int hctx_idx)
1375{
1376 struct nvme_fc_ctrl *ctrl = data;
1377
1378 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1379
1380 return 0;
1381}
1382
1383static void
1384nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1385{
1386 struct nvme_fc_queue *queue;
1387
1388 queue = &ctrl->queues[idx];
1389 memset(queue, 0, sizeof(*queue));
1390 queue->ctrl = ctrl;
1391 queue->qnum = idx;
1392 atomic_set(&queue->csn, 1);
1393 queue->dev = ctrl->dev;
1394
1395 if (idx > 0)
1396 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1397 else
1398 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1399
1400 queue->queue_size = queue_size;
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422static void
1423nvme_fc_free_queue(struct nvme_fc_queue *queue)
1424{
1425 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1426 return;
1427
1428
1429
1430
1431
1432
1433
1434 queue->connection_id = 0;
1435 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1436}
1437
1438static void
1439__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1440 struct nvme_fc_queue *queue, unsigned int qidx)
1441{
1442 if (ctrl->lport->ops->delete_queue)
1443 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1444 queue->lldd_handle);
1445 queue->lldd_handle = NULL;
1446}
1447
1448static void
1449nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
1450{
1451 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
1452 blk_cleanup_queue(ctrl->ctrl.admin_q);
1453 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1454 nvme_fc_free_queue(&ctrl->queues[0]);
1455}
1456
1457static void
1458nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1459{
1460 int i;
1461
1462 for (i = 1; i < ctrl->queue_count; i++)
1463 nvme_fc_free_queue(&ctrl->queues[i]);
1464}
1465
1466static int
1467__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1468 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1469{
1470 int ret = 0;
1471
1472 queue->lldd_handle = NULL;
1473 if (ctrl->lport->ops->create_queue)
1474 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1475 qidx, qsize, &queue->lldd_handle);
1476
1477 return ret;
1478}
1479
1480static void
1481nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1482{
1483 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1484 int i;
1485
1486 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1487 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1488}
1489
1490static int
1491nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1492{
1493 struct nvme_fc_queue *queue = &ctrl->queues[1];
1494 int i, ret;
1495
1496 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1497 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1498 if (ret)
1499 goto delete_queues;
1500 }
1501
1502 return 0;
1503
1504delete_queues:
1505 for (; i >= 0; i--)
1506 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1507 return ret;
1508}
1509
1510static int
1511nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1512{
1513 int i, ret = 0;
1514
1515 for (i = 1; i < ctrl->queue_count; i++) {
1516 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1517 (qsize / 5));
1518 if (ret)
1519 break;
1520 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1521 if (ret)
1522 break;
1523 }
1524
1525 return ret;
1526}
1527
1528static void
1529nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1530{
1531 int i;
1532
1533 for (i = 1; i < ctrl->queue_count; i++)
1534 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1535}
1536
1537static void
1538nvme_fc_ctrl_free(struct kref *ref)
1539{
1540 struct nvme_fc_ctrl *ctrl =
1541 container_of(ref, struct nvme_fc_ctrl, ref);
1542 unsigned long flags;
1543
1544 if (ctrl->state != FCCTRL_INIT) {
1545
1546 spin_lock_irqsave(&ctrl->rport->lock, flags);
1547 list_del(&ctrl->ctrl_list);
1548 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1549 }
1550
1551 put_device(ctrl->dev);
1552 nvme_fc_rport_put(ctrl->rport);
1553
1554 kfree(ctrl->queues);
1555 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1556 nvmf_free_options(ctrl->ctrl.opts);
1557 kfree(ctrl);
1558}
1559
1560static void
1561nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1562{
1563 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1564}
1565
1566static int
1567nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1568{
1569 return kref_get_unless_zero(&ctrl->ref);
1570}
1571
1572
1573
1574
1575
1576static void
1577nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
1578{
1579 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1580
1581 WARN_ON(nctrl != &ctrl->ctrl);
1582
1583
1584
1585
1586
1587
1588 if (ctrl->state != FCCTRL_INIT) {
1589
1590 nvme_fc_xmt_disconnect_assoc(ctrl);
1591
1592 if (ctrl->ctrl.tagset) {
1593 blk_cleanup_queue(ctrl->ctrl.connect_q);
1594 blk_mq_free_tag_set(&ctrl->tag_set);
1595 nvme_fc_delete_hw_io_queues(ctrl);
1596 nvme_fc_free_io_queues(ctrl);
1597 }
1598
1599 nvme_fc_exit_aen_ops(ctrl);
1600
1601 nvme_fc_destroy_admin_queue(ctrl);
1602 }
1603
1604 nvme_fc_ctrl_put(ctrl);
1605}
1606
1607
1608static int
1609__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1610{
1611 int state;
1612
1613 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1614 if (state != FCPOP_STATE_ACTIVE) {
1615 atomic_set(&op->state, state);
1616 return -ECANCELED;
1617 }
1618
1619 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1620 &ctrl->rport->remoteport,
1621 op->queue->lldd_handle,
1622 &op->fcp_req);
1623
1624 return 0;
1625}
1626
1627enum blk_eh_timer_return
1628nvme_fc_timeout(struct request *rq, bool reserved)
1629{
1630 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1631 struct nvme_fc_ctrl *ctrl = op->ctrl;
1632 int ret;
1633
1634 if (reserved)
1635 return BLK_EH_RESET_TIMER;
1636
1637 ret = __nvme_fc_abort_op(ctrl, op);
1638 if (ret)
1639
1640 return BLK_EH_HANDLED;
1641
1642
1643
1644
1645
1646
1647
1648
1649 return BLK_EH_HANDLED;
1650}
1651
1652static int
1653nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1654 struct nvme_fc_fcp_op *op)
1655{
1656 struct nvmefc_fcp_req *freq = &op->fcp_req;
1657 enum dma_data_direction dir;
1658 int ret;
1659
1660 freq->sg_cnt = 0;
1661
1662 if (!blk_rq_payload_bytes(rq))
1663 return 0;
1664
1665 freq->sg_table.sgl = freq->first_sgl;
1666 ret = sg_alloc_table_chained(&freq->sg_table,
1667 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1668 if (ret)
1669 return -ENOMEM;
1670
1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1672 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1675 op->nents, dir);
1676 if (unlikely(freq->sg_cnt <= 0)) {
1677 sg_free_table_chained(&freq->sg_table, true);
1678 freq->sg_cnt = 0;
1679 return -EFAULT;
1680 }
1681
1682
1683
1684
1685 return 0;
1686}
1687
1688static void
1689nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1690 struct nvme_fc_fcp_op *op)
1691{
1692 struct nvmefc_fcp_req *freq = &op->fcp_req;
1693
1694 if (!freq->sg_cnt)
1695 return;
1696
1697 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1698 ((rq_data_dir(rq) == WRITE) ?
1699 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1700
1701 nvme_cleanup_cmd(rq);
1702
1703 sg_free_table_chained(&freq->sg_table, true);
1704
1705 freq->sg_cnt = 0;
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static int
1732nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1733 struct nvme_fc_fcp_op *op, u32 data_len,
1734 enum nvmefc_fcp_datadir io_dir)
1735{
1736 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1737 struct nvme_command *sqe = &cmdiu->sqe;
1738 u32 csn;
1739 int ret;
1740
1741 if (!nvme_fc_ctrl_get(ctrl))
1742 return BLK_MQ_RQ_QUEUE_ERROR;
1743
1744
1745 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1746 csn = atomic_inc_return(&queue->csn);
1747 cmdiu->csn = cpu_to_be32(csn);
1748 cmdiu->data_len = cpu_to_be32(data_len);
1749 switch (io_dir) {
1750 case NVMEFC_FCP_WRITE:
1751 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1752 break;
1753 case NVMEFC_FCP_READ:
1754 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1755 break;
1756 case NVMEFC_FCP_NODATA:
1757 cmdiu->flags = 0;
1758 break;
1759 }
1760 op->fcp_req.payload_length = data_len;
1761 op->fcp_req.io_dir = io_dir;
1762 op->fcp_req.transferred_length = 0;
1763 op->fcp_req.rcv_rsplen = 0;
1764 op->fcp_req.status = 0;
1765 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1766
1767
1768
1769
1770
1771 WARN_ON_ONCE(sqe->common.metadata);
1772 WARN_ON_ONCE(sqe->common.dptr.prp1);
1773 WARN_ON_ONCE(sqe->common.dptr.prp2);
1774 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1775
1776
1777
1778
1779
1780
1781 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1782 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1783 sqe->rw.dptr.sgl.addr = 0;
1784
1785
1786 WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
1787
1788 if (op->rq) {
1789 ret = nvme_fc_map_data(ctrl, op->rq, op);
1790 if (ret < 0) {
1791 dev_err(queue->ctrl->ctrl.device,
1792 "Failed to map data (%d)\n", ret);
1793 nvme_cleanup_cmd(op->rq);
1794 nvme_fc_ctrl_put(ctrl);
1795 return (ret == -ENOMEM || ret == -EAGAIN) ?
1796 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1797 }
1798 }
1799
1800 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1801 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1802
1803 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1804
1805 if (op->rq)
1806 blk_mq_start_request(op->rq);
1807
1808 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1809 &ctrl->rport->remoteport,
1810 queue->lldd_handle, &op->fcp_req);
1811
1812 if (ret) {
1813 dev_err(ctrl->dev,
1814 "Send nvme command failed - lldd returned %d.\n", ret);
1815
1816 if (op->rq) {
1817 nvme_fc_unmap_data(ctrl, op->rq, op);
1818 nvme_cleanup_cmd(op->rq);
1819 }
1820
1821
1822 nvme_fc_ctrl_put(ctrl);
1823
1824 if (ret != -EBUSY)
1825 return BLK_MQ_RQ_QUEUE_ERROR;
1826
1827 if (op->rq) {
1828 blk_mq_stop_hw_queues(op->rq->q);
1829 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1830 }
1831 return BLK_MQ_RQ_QUEUE_BUSY;
1832 }
1833
1834 return BLK_MQ_RQ_QUEUE_OK;
1835}
1836
1837static int
1838nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1839 const struct blk_mq_queue_data *bd)
1840{
1841 struct nvme_ns *ns = hctx->queue->queuedata;
1842 struct nvme_fc_queue *queue = hctx->driver_data;
1843 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1844 struct request *rq = bd->rq;
1845 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1846 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1847 struct nvme_command *sqe = &cmdiu->sqe;
1848 enum nvmefc_fcp_datadir io_dir;
1849 u32 data_len;
1850 int ret;
1851
1852 ret = nvme_setup_cmd(ns, rq, sqe);
1853 if (ret)
1854 return ret;
1855
1856 data_len = blk_rq_payload_bytes(rq);
1857 if (data_len)
1858 io_dir = ((rq_data_dir(rq) == WRITE) ?
1859 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
1860 else
1861 io_dir = NVMEFC_FCP_NODATA;
1862
1863 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
1864}
1865
1866static struct blk_mq_tags *
1867nvme_fc_tagset(struct nvme_fc_queue *queue)
1868{
1869 if (queue->qnum == 0)
1870 return queue->ctrl->admin_tag_set.tags[queue->qnum];
1871
1872 return queue->ctrl->tag_set.tags[queue->qnum - 1];
1873}
1874
1875static int
1876nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1877
1878{
1879 struct nvme_fc_queue *queue = hctx->driver_data;
1880 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1881 struct request *req;
1882 struct nvme_fc_fcp_op *op;
1883
1884 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
1885 if (!req) {
1886 dev_err(queue->ctrl->ctrl.device,
1887 "tag 0x%x on QNum %#x not found\n",
1888 tag, queue->qnum);
1889 return 0;
1890 }
1891
1892 op = blk_mq_rq_to_pdu(req);
1893
1894 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
1895 (ctrl->lport->ops->poll_queue))
1896 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
1897 queue->lldd_handle);
1898
1899 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
1900}
1901
1902static void
1903nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1904{
1905 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
1906 struct nvme_fc_fcp_op *aen_op;
1907 int ret;
1908
1909 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
1910 return;
1911
1912 aen_op = &ctrl->aen_ops[aer_idx];
1913
1914 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
1915 NVMEFC_FCP_NODATA);
1916 if (ret)
1917 dev_err(ctrl->ctrl.device,
1918 "failed async event work [%d]\n", aer_idx);
1919}
1920
1921static void
1922nvme_fc_complete_rq(struct request *rq)
1923{
1924 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1925 struct nvme_fc_ctrl *ctrl = op->ctrl;
1926 int error = 0, state;
1927
1928 state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
1929
1930 nvme_cleanup_cmd(rq);
1931
1932 nvme_fc_unmap_data(ctrl, rq, op);
1933
1934 if (unlikely(rq->errors)) {
1935 if (nvme_req_needs_retry(rq, rq->errors)) {
1936 nvme_requeue_req(rq);
1937 return;
1938 }
1939
1940 if (blk_rq_is_passthrough(rq))
1941 error = rq->errors;
1942 else
1943 error = nvme_error_status(rq->errors);
1944 }
1945
1946 nvme_fc_ctrl_put(ctrl);
1947
1948 blk_mq_end_request(rq, error);
1949}
1950
1951static struct blk_mq_ops nvme_fc_mq_ops = {
1952 .queue_rq = nvme_fc_queue_rq,
1953 .complete = nvme_fc_complete_rq,
1954 .init_request = nvme_fc_init_request,
1955 .exit_request = nvme_fc_exit_request,
1956 .reinit_request = nvme_fc_reinit_request,
1957 .init_hctx = nvme_fc_init_hctx,
1958 .poll = nvme_fc_poll,
1959 .timeout = nvme_fc_timeout,
1960};
1961
1962static struct blk_mq_ops nvme_fc_admin_mq_ops = {
1963 .queue_rq = nvme_fc_queue_rq,
1964 .complete = nvme_fc_complete_rq,
1965 .init_request = nvme_fc_init_admin_request,
1966 .exit_request = nvme_fc_exit_request,
1967 .reinit_request = nvme_fc_reinit_request,
1968 .init_hctx = nvme_fc_init_admin_hctx,
1969 .timeout = nvme_fc_timeout,
1970};
1971
1972static int
1973nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
1974{
1975 u32 segs;
1976 int error;
1977
1978 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
1979
1980 error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
1981 NVME_FC_AQ_BLKMQ_DEPTH,
1982 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
1983 if (error)
1984 return error;
1985
1986 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
1987 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
1988 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
1989 ctrl->admin_tag_set.reserved_tags = 2;
1990 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
1991 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
1992 (SG_CHUNK_SIZE *
1993 sizeof(struct scatterlist)) +
1994 ctrl->lport->ops->fcprqst_priv_sz;
1995 ctrl->admin_tag_set.driver_data = ctrl;
1996 ctrl->admin_tag_set.nr_hw_queues = 1;
1997 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
1998
1999 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2000 if (error)
2001 goto out_free_queue;
2002
2003 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2004 if (IS_ERR(ctrl->ctrl.admin_q)) {
2005 error = PTR_ERR(ctrl->ctrl.admin_q);
2006 goto out_free_tagset;
2007 }
2008
2009 error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2010 NVME_FC_AQ_BLKMQ_DEPTH);
2011 if (error)
2012 goto out_cleanup_queue;
2013
2014 error = nvmf_connect_admin_queue(&ctrl->ctrl);
2015 if (error)
2016 goto out_delete_hw_queue;
2017
2018 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2019 if (error) {
2020 dev_err(ctrl->ctrl.device,
2021 "prop_get NVME_REG_CAP failed\n");
2022 goto out_delete_hw_queue;
2023 }
2024
2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error)
2030 goto out_delete_hw_queue;
2031
2032 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2033 ctrl->lport->ops->max_sgl_segments);
2034 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2035
2036 error = nvme_init_identify(&ctrl->ctrl);
2037 if (error)
2038 goto out_delete_hw_queue;
2039
2040 nvme_start_keep_alive(&ctrl->ctrl);
2041
2042 return 0;
2043
2044out_delete_hw_queue:
2045 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2046out_cleanup_queue:
2047 blk_cleanup_queue(ctrl->ctrl.admin_q);
2048out_free_tagset:
2049 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2050out_free_queue:
2051 nvme_fc_free_queue(&ctrl->queues[0]);
2052 return error;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static void
2069nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2070{
2071 struct nvme_ctrl *nctrl = data;
2072 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2073 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2074int status;
2075
2076 if (!blk_mq_request_started(req))
2077 return;
2078
2079
2080 status = __nvme_fc_abort_op(ctrl, op);
2081
2082
2083
2084
2085
2086 if (status)
2087
2088
2089 return;
2090}
2091
2092
2093
2094
2095
2096
2097
2098static void
2099nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
2100{
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113 if (ctrl->queue_count > 1) {
2114 nvme_stop_queues(&ctrl->ctrl);
2115 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2116 nvme_fc_terminate_exchange, &ctrl->ctrl);
2117 }
2118
2119 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
2120 nvme_shutdown_ctrl(&ctrl->ctrl);
2121
2122
2123
2124
2125
2126
2127 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2128 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2129 nvme_fc_terminate_exchange, &ctrl->ctrl);
2130}
2131
2132
2133
2134
2135
2136static void
2137__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
2138{
2139 nvme_stop_keep_alive(&ctrl->ctrl);
2140
2141
2142 nvme_fc_shutdown_ctrl(ctrl);
2143
2144
2145
2146
2147
2148
2149
2150
2151 nvme_uninit_ctrl(&ctrl->ctrl);
2152
2153 nvme_put_ctrl(&ctrl->ctrl);
2154}
2155
2156static void
2157nvme_fc_del_ctrl_work(struct work_struct *work)
2158{
2159 struct nvme_fc_ctrl *ctrl =
2160 container_of(work, struct nvme_fc_ctrl, delete_work);
2161
2162 __nvme_fc_remove_ctrl(ctrl);
2163}
2164
2165static int
2166__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2167{
2168 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2169 return -EBUSY;
2170
2171 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2172 return -EBUSY;
2173
2174 return 0;
2175}
2176
2177
2178
2179
2180static int
2181nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2182{
2183 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2184 struct nvme_fc_rport *rport = ctrl->rport;
2185 unsigned long flags;
2186 int ret;
2187
2188 spin_lock_irqsave(&rport->lock, flags);
2189 ret = __nvme_fc_del_ctrl(ctrl);
2190 spin_unlock_irqrestore(&rport->lock, flags);
2191 if (ret)
2192 return ret;
2193
2194 flush_work(&ctrl->delete_work);
2195
2196 return 0;
2197}
2198
2199static int
2200nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2201{
2202 return -EIO;
2203}
2204
2205static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2206 .name = "fc",
2207 .module = THIS_MODULE,
2208 .is_fabrics = true,
2209 .reg_read32 = nvmf_reg_read32,
2210 .reg_read64 = nvmf_reg_read64,
2211 .reg_write32 = nvmf_reg_write32,
2212 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2213 .free_ctrl = nvme_fc_free_nvme_ctrl,
2214 .submit_async_event = nvme_fc_submit_async_event,
2215 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2216 .get_subsysnqn = nvmf_get_subsysnqn,
2217 .get_address = nvmf_get_address,
2218};
2219
2220static int
2221nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2222{
2223 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2224 int ret;
2225
2226 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2227 if (ret) {
2228 dev_info(ctrl->ctrl.device,
2229 "set_queue_count failed: %d\n", ret);
2230 return ret;
2231 }
2232
2233 ctrl->queue_count = opts->nr_io_queues + 1;
2234 if (!opts->nr_io_queues)
2235 return 0;
2236
2237 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2238 opts->nr_io_queues);
2239
2240 nvme_fc_init_io_queues(ctrl);
2241
2242 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2243 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2244 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2245 ctrl->tag_set.reserved_tags = 1;
2246 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2247 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2248 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2249 (SG_CHUNK_SIZE *
2250 sizeof(struct scatterlist)) +
2251 ctrl->lport->ops->fcprqst_priv_sz;
2252 ctrl->tag_set.driver_data = ctrl;
2253 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2254 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2255
2256 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2257 if (ret)
2258 return ret;
2259
2260 ctrl->ctrl.tagset = &ctrl->tag_set;
2261
2262 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2263 if (IS_ERR(ctrl->ctrl.connect_q)) {
2264 ret = PTR_ERR(ctrl->ctrl.connect_q);
2265 goto out_free_tag_set;
2266 }
2267
2268 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2269 if (ret)
2270 goto out_cleanup_blk_queue;
2271
2272 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2273 if (ret)
2274 goto out_delete_hw_queues;
2275
2276 return 0;
2277
2278out_delete_hw_queues:
2279 nvme_fc_delete_hw_io_queues(ctrl);
2280out_cleanup_blk_queue:
2281 nvme_stop_keep_alive(&ctrl->ctrl);
2282 blk_cleanup_queue(ctrl->ctrl.connect_q);
2283out_free_tag_set:
2284 blk_mq_free_tag_set(&ctrl->tag_set);
2285 nvme_fc_free_io_queues(ctrl);
2286
2287
2288 ctrl->ctrl.tagset = NULL;
2289
2290 return ret;
2291}
2292
2293
2294static struct nvme_ctrl *
2295__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2296 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2297{
2298 struct nvme_fc_ctrl *ctrl;
2299 unsigned long flags;
2300 int ret, idx;
2301 bool changed;
2302
2303 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2304 if (!ctrl) {
2305 ret = -ENOMEM;
2306 goto out_fail;
2307 }
2308
2309 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2310 if (idx < 0) {
2311 ret = -ENOSPC;
2312 goto out_free_ctrl;
2313 }
2314
2315 ctrl->ctrl.opts = opts;
2316 INIT_LIST_HEAD(&ctrl->ctrl_list);
2317 INIT_LIST_HEAD(&ctrl->ls_req_list);
2318 ctrl->lport = lport;
2319 ctrl->rport = rport;
2320 ctrl->dev = lport->dev;
2321 ctrl->state = FCCTRL_INIT;
2322 ctrl->cnum = idx;
2323
2324 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2325 if (ret)
2326 goto out_free_ida;
2327
2328 get_device(ctrl->dev);
2329 kref_init(&ctrl->ref);
2330
2331 INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
2332 spin_lock_init(&ctrl->lock);
2333
2334
2335 ctrl->queue_count = min_t(unsigned int,
2336 opts->nr_io_queues,
2337 lport->ops->max_hw_queues);
2338 opts->nr_io_queues = ctrl->queue_count;
2339 ctrl->queue_count++;
2340
2341 ctrl->ctrl.sqsize = opts->queue_size - 1;
2342 ctrl->ctrl.kato = opts->kato;
2343
2344 ret = -ENOMEM;
2345 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2346 GFP_KERNEL);
2347 if (!ctrl->queues)
2348 goto out_uninit_ctrl;
2349
2350 ret = nvme_fc_configure_admin_queue(ctrl);
2351 if (ret)
2352 goto out_uninit_ctrl;
2353
2354
2355
2356
2357 if (ctrl->ctrl.icdoff) {
2358 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2359 ctrl->ctrl.icdoff);
2360 goto out_remove_admin_queue;
2361 }
2362
2363
2364
2365 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2366
2367 dev_warn(ctrl->ctrl.device,
2368 "queue_size %zu > ctrl maxcmd %u, reducing "
2369 "to queue_size\n",
2370 opts->queue_size, ctrl->ctrl.maxcmd);
2371 opts->queue_size = ctrl->ctrl.maxcmd;
2372 }
2373
2374 ret = nvme_fc_init_aen_ops(ctrl);
2375 if (ret)
2376 goto out_exit_aen_ops;
2377
2378 if (ctrl->queue_count > 1) {
2379 ret = nvme_fc_create_io_queues(ctrl);
2380 if (ret)
2381 goto out_exit_aen_ops;
2382 }
2383
2384 spin_lock_irqsave(&ctrl->lock, flags);
2385 ctrl->state = FCCTRL_ACTIVE;
2386 spin_unlock_irqrestore(&ctrl->lock, flags);
2387
2388 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2389 WARN_ON_ONCE(!changed);
2390
2391 dev_info(ctrl->ctrl.device,
2392 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2393 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2394
2395 kref_get(&ctrl->ctrl.kref);
2396
2397 spin_lock_irqsave(&rport->lock, flags);
2398 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2399 spin_unlock_irqrestore(&rport->lock, flags);
2400
2401 if (opts->nr_io_queues) {
2402 nvme_queue_scan(&ctrl->ctrl);
2403 nvme_queue_async_events(&ctrl->ctrl);
2404 }
2405
2406 return &ctrl->ctrl;
2407
2408out_exit_aen_ops:
2409 nvme_fc_exit_aen_ops(ctrl);
2410out_remove_admin_queue:
2411
2412 nvme_fc_xmt_disconnect_assoc(ctrl);
2413 nvme_stop_keep_alive(&ctrl->ctrl);
2414 nvme_fc_destroy_admin_queue(ctrl);
2415out_uninit_ctrl:
2416 nvme_uninit_ctrl(&ctrl->ctrl);
2417 nvme_put_ctrl(&ctrl->ctrl);
2418 if (ret > 0)
2419 ret = -EIO;
2420
2421 return ERR_PTR(ret);
2422
2423out_free_ida:
2424 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2425out_free_ctrl:
2426 kfree(ctrl);
2427out_fail:
2428 nvme_fc_rport_put(rport);
2429
2430 return ERR_PTR(ret);
2431}
2432
2433enum {
2434 FCT_TRADDR_ERR = 0,
2435 FCT_TRADDR_WWNN = 1 << 0,
2436 FCT_TRADDR_WWPN = 1 << 1,
2437};
2438
2439struct nvmet_fc_traddr {
2440 u64 nn;
2441 u64 pn;
2442};
2443
2444static const match_table_t traddr_opt_tokens = {
2445 { FCT_TRADDR_WWNN, "nn-%s" },
2446 { FCT_TRADDR_WWPN, "pn-%s" },
2447 { FCT_TRADDR_ERR, NULL }
2448};
2449
2450static int
2451nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2452{
2453 substring_t args[MAX_OPT_ARGS];
2454 char *options, *o, *p;
2455 int token, ret = 0;
2456 u64 token64;
2457
2458 options = o = kstrdup(buf, GFP_KERNEL);
2459 if (!options)
2460 return -ENOMEM;
2461
2462 while ((p = strsep(&o, ":\n")) != NULL) {
2463 if (!*p)
2464 continue;
2465
2466 token = match_token(p, traddr_opt_tokens, args);
2467 switch (token) {
2468 case FCT_TRADDR_WWNN:
2469 if (match_u64(args, &token64)) {
2470 ret = -EINVAL;
2471 goto out;
2472 }
2473 traddr->nn = token64;
2474 break;
2475 case FCT_TRADDR_WWPN:
2476 if (match_u64(args, &token64)) {
2477 ret = -EINVAL;
2478 goto out;
2479 }
2480 traddr->pn = token64;
2481 break;
2482 default:
2483 pr_warn("unknown traddr token or missing value '%s'\n",
2484 p);
2485 ret = -EINVAL;
2486 goto out;
2487 }
2488 }
2489
2490out:
2491 kfree(options);
2492 return ret;
2493}
2494
2495static struct nvme_ctrl *
2496nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2497{
2498 struct nvme_fc_lport *lport;
2499 struct nvme_fc_rport *rport;
2500 struct nvmet_fc_traddr laddr = { 0L, 0L };
2501 struct nvmet_fc_traddr raddr = { 0L, 0L };
2502 unsigned long flags;
2503 int ret;
2504
2505 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2506 if (ret || !raddr.nn || !raddr.pn)
2507 return ERR_PTR(-EINVAL);
2508
2509 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2510 if (ret || !laddr.nn || !laddr.pn)
2511 return ERR_PTR(-EINVAL);
2512
2513
2514 spin_lock_irqsave(&nvme_fc_lock, flags);
2515 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2516 if (lport->localport.node_name != laddr.nn ||
2517 lport->localport.port_name != laddr.pn)
2518 continue;
2519
2520 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2521 if (rport->remoteport.node_name != raddr.nn ||
2522 rport->remoteport.port_name != raddr.pn)
2523 continue;
2524
2525
2526 if (!nvme_fc_rport_get(rport))
2527 break;
2528
2529 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2530
2531 return __nvme_fc_create_ctrl(dev, opts, lport, rport);
2532 }
2533 }
2534 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2535
2536 return ERR_PTR(-ENOENT);
2537}
2538
2539
2540static struct nvmf_transport_ops nvme_fc_transport = {
2541 .name = "fc",
2542 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2543 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2544 .create_ctrl = nvme_fc_create_ctrl,
2545};
2546
2547static int __init nvme_fc_init_module(void)
2548{
2549 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2550 if (!nvme_fc_wq)
2551 return -ENOMEM;
2552
2553 return nvmf_register_transport(&nvme_fc_transport);
2554}
2555
2556static void __exit nvme_fc_exit_module(void)
2557{
2558
2559 if (!list_empty(&nvme_fc_lport_list))
2560 pr_warn("%s: localport list not empty\n", __func__);
2561
2562 nvmf_unregister_transport(&nvme_fc_transport);
2563
2564 destroy_workqueue(nvme_fc_wq);
2565
2566 ida_destroy(&nvme_fc_local_port_cnt);
2567 ida_destroy(&nvme_fc_ctrl_cnt);
2568}
2569
2570module_init(nvme_fc_init_module);
2571module_exit(nvme_fc_exit_module);
2572
2573MODULE_LICENSE("GPL v2");
2574