1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9
10#include "../host/nvme.h"
11#include "../target/nvmet.h"
12#include <linux/nvme-fc-driver.h>
13#include <linux/nvme-fc.h>
14
15
16enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24};
25
26struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34};
35
36static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44};
45
46static int fcloop_verify_addr(substring_t *s)
47{
48 size_t blen = s->to - s->from + 1;
49
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
52 return -EINVAL;
53
54 return 0;
55}
56
57static int
58fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60{
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
80 ret = -EINVAL;
81 goto out_free_options;
82 }
83 opts->wwnn = token64;
84 break;
85 case NVMF_OPT_WWPN:
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
88 ret = -EINVAL;
89 goto out_free_options;
90 }
91 opts->wwpn = token64;
92 break;
93 case NVMF_OPT_ROLES:
94 if (match_int(args, &token)) {
95 ret = -EINVAL;
96 goto out_free_options;
97 }
98 opts->roles = token;
99 break;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
102 ret = -EINVAL;
103 goto out_free_options;
104 }
105 opts->fcaddr = token;
106 break;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 opts->lpwwnn = token64;
114 break;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
118 ret = -EINVAL;
119 goto out_free_options;
120 }
121 opts->lpwwpn = token64;
122 break;
123 default:
124 pr_warn("unknown parameter or missing value '%s'\n", p);
125 ret = -EINVAL;
126 goto out_free_options;
127 }
128 }
129
130out_free_options:
131 kfree(options);
132 return ret;
133}
134
135
136static int
137fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 const char *buf)
139{
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
142 int token, ret = 0;
143 u64 token64;
144
145 *nname = -1;
146 *pname = -1;
147
148 options = o = kstrdup(buf, GFP_KERNEL);
149 if (!options)
150 return -ENOMEM;
151
152 while ((p = strsep(&o, ",\n")) != NULL) {
153 if (!*p)
154 continue;
155
156 token = match_token(p, opt_tokens, args);
157 switch (token) {
158 case NVMF_OPT_WWNN:
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
161 ret = -EINVAL;
162 goto out_free_options;
163 }
164 *nname = token64;
165 break;
166 case NVMF_OPT_WWPN:
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
169 ret = -EINVAL;
170 goto out_free_options;
171 }
172 *pname = token64;
173 break;
174 default:
175 pr_warn("unknown parameter or missing value '%s'\n", p);
176 ret = -EINVAL;
177 goto out_free_options;
178 }
179 }
180
181out_free_options:
182 kfree(options);
183
184 if (!ret) {
185 if (*nname == -1)
186 return -EINVAL;
187 if (*pname == -1)
188 return -EINVAL;
189 }
190
191 return ret;
192}
193
194
195#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196
197#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199
200#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201
202
203static DEFINE_SPINLOCK(fcloop_lock);
204static LIST_HEAD(fcloop_lports);
205static LIST_HEAD(fcloop_nports);
206
207struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 struct completion unreg_done;
211};
212
213struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
215};
216
217struct fcloop_rport {
218 struct nvme_fc_remote_port *remoteport;
219 struct nvmet_fc_target_port *targetport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
222 spinlock_t lock;
223 struct list_head ls_list;
224 struct work_struct ls_work;
225};
226
227struct fcloop_tport {
228 struct nvmet_fc_target_port *targetport;
229 struct nvme_fc_remote_port *remoteport;
230 struct fcloop_nport *nport;
231 struct fcloop_lport *lport;
232 spinlock_t lock;
233 struct list_head ls_list;
234 struct work_struct ls_work;
235};
236
237struct fcloop_nport {
238 struct fcloop_rport *rport;
239 struct fcloop_tport *tport;
240 struct fcloop_lport *lport;
241 struct list_head nport_list;
242 struct kref ref;
243 u64 node_name;
244 u64 port_name;
245 u32 port_role;
246 u32 port_id;
247};
248
249struct fcloop_lsreq {
250 struct nvmefc_ls_req *lsreq;
251 struct nvmefc_ls_rsp ls_rsp;
252 int lsdir;
253 int status;
254 struct list_head ls_list;
255};
256
257struct fcloop_rscn {
258 struct fcloop_tport *tport;
259 struct work_struct work;
260};
261
262enum {
263 INI_IO_START = 0,
264 INI_IO_ACTIVE = 1,
265 INI_IO_ABORTED = 2,
266 INI_IO_COMPLETED = 3,
267};
268
269struct fcloop_fcpreq {
270 struct fcloop_tport *tport;
271 struct nvmefc_fcp_req *fcpreq;
272 spinlock_t reqlock;
273 u16 status;
274 u32 inistate;
275 bool active;
276 bool aborted;
277 struct kref ref;
278 struct work_struct fcp_rcv_work;
279 struct work_struct abort_rcv_work;
280 struct work_struct tio_done_work;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req;
282};
283
284struct fcloop_ini_fcpreq {
285 struct nvmefc_fcp_req *fcpreq;
286 struct fcloop_fcpreq *tfcp_req;
287 spinlock_t inilock;
288};
289
290static inline struct fcloop_lsreq *
291ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
292{
293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
294}
295
296static inline struct fcloop_fcpreq *
297tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
298{
299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
300}
301
302
303static int
304fcloop_create_queue(struct nvme_fc_local_port *localport,
305 unsigned int qidx, u16 qsize,
306 void **handle)
307{
308 *handle = localport;
309 return 0;
310}
311
312static void
313fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 unsigned int idx, void *handle)
315{
316}
317
318static void
319fcloop_rport_lsrqst_work(struct work_struct *work)
320{
321 struct fcloop_rport *rport =
322 container_of(work, struct fcloop_rport, ls_work);
323 struct fcloop_lsreq *tls_req;
324
325 spin_lock(&rport->lock);
326 for (;;) {
327 tls_req = list_first_entry_or_null(&rport->ls_list,
328 struct fcloop_lsreq, ls_list);
329 if (!tls_req)
330 break;
331
332 list_del(&tls_req->ls_list);
333 spin_unlock(&rport->lock);
334
335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336
337
338
339
340
341 spin_lock(&rport->lock);
342 }
343 spin_unlock(&rport->lock);
344}
345
346static int
347fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 struct nvme_fc_remote_port *remoteport,
349 struct nvmefc_ls_req *lsreq)
350{
351 struct fcloop_lsreq *tls_req = lsreq->private;
352 struct fcloop_rport *rport = remoteport->private;
353 int ret = 0;
354
355 tls_req->lsreq = lsreq;
356 INIT_LIST_HEAD(&tls_req->ls_list);
357
358 if (!rport->targetport) {
359 tls_req->status = -ECONNREFUSED;
360 spin_lock(&rport->lock);
361 list_add_tail(&rport->ls_list, &tls_req->ls_list);
362 spin_unlock(&rport->lock);
363 schedule_work(&rport->ls_work);
364 return ret;
365 }
366
367 tls_req->status = 0;
368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369 &tls_req->ls_rsp,
370 lsreq->rqstaddr, lsreq->rqstlen);
371
372 return ret;
373}
374
375static int
376fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 struct nvmefc_ls_rsp *lsrsp)
378{
379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 struct fcloop_tport *tport = targetport->private;
382 struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 struct fcloop_rport *rport;
384
385 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 ((lsreq->rsplen < lsrsp->rsplen) ?
387 lsreq->rsplen : lsrsp->rsplen));
388
389 lsrsp->done(lsrsp);
390
391 if (remoteport) {
392 rport = remoteport->private;
393 spin_lock(&rport->lock);
394 list_add_tail(&rport->ls_list, &tls_req->ls_list);
395 spin_unlock(&rport->lock);
396 schedule_work(&rport->ls_work);
397 }
398
399 return 0;
400}
401
402static void
403fcloop_tport_lsrqst_work(struct work_struct *work)
404{
405 struct fcloop_tport *tport =
406 container_of(work, struct fcloop_tport, ls_work);
407 struct fcloop_lsreq *tls_req;
408
409 spin_lock(&tport->lock);
410 for (;;) {
411 tls_req = list_first_entry_or_null(&tport->ls_list,
412 struct fcloop_lsreq, ls_list);
413 if (!tls_req)
414 break;
415
416 list_del(&tls_req->ls_list);
417 spin_unlock(&tport->lock);
418
419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420
421
422
423
424
425 spin_lock(&tport->lock);
426 }
427 spin_unlock(&tport->lock);
428}
429
430static int
431fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 struct nvmefc_ls_req *lsreq)
433{
434 struct fcloop_lsreq *tls_req = lsreq->private;
435 struct fcloop_tport *tport = targetport->private;
436 int ret = 0;
437
438
439
440
441
442
443 tls_req->lsreq = lsreq;
444 INIT_LIST_HEAD(&tls_req->ls_list);
445
446 if (!tport->remoteport) {
447 tls_req->status = -ECONNREFUSED;
448 spin_lock(&tport->lock);
449 list_add_tail(&tport->ls_list, &tls_req->ls_list);
450 spin_unlock(&tport->lock);
451 schedule_work(&tport->ls_work);
452 return ret;
453 }
454
455 tls_req->status = 0;
456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 lsreq->rqstaddr, lsreq->rqstlen);
458
459 return ret;
460}
461
462static int
463fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 struct nvme_fc_remote_port *remoteport,
465 struct nvmefc_ls_rsp *lsrsp)
466{
467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 struct fcloop_rport *rport = remoteport->private;
470 struct nvmet_fc_target_port *targetport = rport->targetport;
471 struct fcloop_tport *tport;
472
473 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 ((lsreq->rsplen < lsrsp->rsplen) ?
475 lsreq->rsplen : lsrsp->rsplen));
476 lsrsp->done(lsrsp);
477
478 if (targetport) {
479 tport = targetport->private;
480 spin_lock(&tport->lock);
481 list_add_tail(&tport->ls_list, &tls_req->ls_list);
482 spin_unlock(&tport->lock);
483 schedule_work(&tport->ls_work);
484 }
485
486 return 0;
487}
488
489static void
490fcloop_t2h_host_release(void *hosthandle)
491{
492
493}
494
495
496
497
498
499static void
500fcloop_tgt_rscn_work(struct work_struct *work)
501{
502 struct fcloop_rscn *tgt_rscn =
503 container_of(work, struct fcloop_rscn, work);
504 struct fcloop_tport *tport = tgt_rscn->tport;
505
506 if (tport->remoteport)
507 nvme_fc_rescan_remoteport(tport->remoteport);
508 kfree(tgt_rscn);
509}
510
511static void
512fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
513{
514 struct fcloop_rscn *tgt_rscn;
515
516 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
517 if (!tgt_rscn)
518 return;
519
520 tgt_rscn->tport = tgtport->private;
521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
522
523 schedule_work(&tgt_rscn->work);
524}
525
526static void
527fcloop_tfcp_req_free(struct kref *ref)
528{
529 struct fcloop_fcpreq *tfcp_req =
530 container_of(ref, struct fcloop_fcpreq, ref);
531
532 kfree(tfcp_req);
533}
534
535static void
536fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
537{
538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
539}
540
541static int
542fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
543{
544 return kref_get_unless_zero(&tfcp_req->ref);
545}
546
547static void
548fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
549 struct fcloop_fcpreq *tfcp_req, int status)
550{
551 struct fcloop_ini_fcpreq *inireq = NULL;
552
553 if (fcpreq) {
554 inireq = fcpreq->private;
555 spin_lock(&inireq->inilock);
556 inireq->tfcp_req = NULL;
557 spin_unlock(&inireq->inilock);
558
559 fcpreq->status = status;
560 fcpreq->done(fcpreq);
561 }
562
563
564 fcloop_tfcp_req_put(tfcp_req);
565}
566
567static void
568fcloop_fcp_recv_work(struct work_struct *work)
569{
570 struct fcloop_fcpreq *tfcp_req =
571 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
572 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
573 int ret = 0;
574 bool aborted = false;
575
576 spin_lock_irq(&tfcp_req->reqlock);
577 switch (tfcp_req->inistate) {
578 case INI_IO_START:
579 tfcp_req->inistate = INI_IO_ACTIVE;
580 break;
581 case INI_IO_ABORTED:
582 aborted = true;
583 break;
584 default:
585 spin_unlock_irq(&tfcp_req->reqlock);
586 WARN_ON(1);
587 return;
588 }
589 spin_unlock_irq(&tfcp_req->reqlock);
590
591 if (unlikely(aborted))
592 ret = -ECANCELED;
593 else
594 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
595 &tfcp_req->tgt_fcp_req,
596 fcpreq->cmdaddr, fcpreq->cmdlen);
597 if (ret)
598 fcloop_call_host_done(fcpreq, tfcp_req, ret);
599
600 return;
601}
602
603static void
604fcloop_fcp_abort_recv_work(struct work_struct *work)
605{
606 struct fcloop_fcpreq *tfcp_req =
607 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
608 struct nvmefc_fcp_req *fcpreq;
609 bool completed = false;
610
611 spin_lock_irq(&tfcp_req->reqlock);
612 fcpreq = tfcp_req->fcpreq;
613 switch (tfcp_req->inistate) {
614 case INI_IO_ABORTED:
615 break;
616 case INI_IO_COMPLETED:
617 completed = true;
618 break;
619 default:
620 spin_unlock_irq(&tfcp_req->reqlock);
621 WARN_ON(1);
622 return;
623 }
624 spin_unlock_irq(&tfcp_req->reqlock);
625
626 if (unlikely(completed)) {
627
628 fcloop_tfcp_req_put(tfcp_req);
629 return;
630 }
631
632 if (tfcp_req->tport->targetport)
633 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
634 &tfcp_req->tgt_fcp_req);
635
636 spin_lock_irq(&tfcp_req->reqlock);
637 tfcp_req->fcpreq = NULL;
638 spin_unlock_irq(&tfcp_req->reqlock);
639
640 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
641
642}
643
644
645
646
647
648static void
649fcloop_tgt_fcprqst_done_work(struct work_struct *work)
650{
651 struct fcloop_fcpreq *tfcp_req =
652 container_of(work, struct fcloop_fcpreq, tio_done_work);
653 struct nvmefc_fcp_req *fcpreq;
654
655 spin_lock_irq(&tfcp_req->reqlock);
656 fcpreq = tfcp_req->fcpreq;
657 tfcp_req->inistate = INI_IO_COMPLETED;
658 spin_unlock_irq(&tfcp_req->reqlock);
659
660 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
661}
662
663
664static int
665fcloop_fcp_req(struct nvme_fc_local_port *localport,
666 struct nvme_fc_remote_port *remoteport,
667 void *hw_queue_handle,
668 struct nvmefc_fcp_req *fcpreq)
669{
670 struct fcloop_rport *rport = remoteport->private;
671 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
672 struct fcloop_fcpreq *tfcp_req;
673
674 if (!rport->targetport)
675 return -ECONNREFUSED;
676
677 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
678 if (!tfcp_req)
679 return -ENOMEM;
680
681 inireq->fcpreq = fcpreq;
682 inireq->tfcp_req = tfcp_req;
683 spin_lock_init(&inireq->inilock);
684
685 tfcp_req->fcpreq = fcpreq;
686 tfcp_req->tport = rport->targetport->private;
687 tfcp_req->inistate = INI_IO_START;
688 spin_lock_init(&tfcp_req->reqlock);
689 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
690 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
691 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
692 kref_init(&tfcp_req->ref);
693
694 schedule_work(&tfcp_req->fcp_rcv_work);
695
696 return 0;
697}
698
699static void
700fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
701 struct scatterlist *io_sg, u32 offset, u32 length)
702{
703 void *data_p, *io_p;
704 u32 data_len, io_len, tlen;
705
706 io_p = sg_virt(io_sg);
707 io_len = io_sg->length;
708
709 for ( ; offset; ) {
710 tlen = min_t(u32, offset, io_len);
711 offset -= tlen;
712 io_len -= tlen;
713 if (!io_len) {
714 io_sg = sg_next(io_sg);
715 io_p = sg_virt(io_sg);
716 io_len = io_sg->length;
717 } else
718 io_p += tlen;
719 }
720
721 data_p = sg_virt(data_sg);
722 data_len = data_sg->length;
723
724 for ( ; length; ) {
725 tlen = min_t(u32, io_len, data_len);
726 tlen = min_t(u32, tlen, length);
727
728 if (op == NVMET_FCOP_WRITEDATA)
729 memcpy(data_p, io_p, tlen);
730 else
731 memcpy(io_p, data_p, tlen);
732
733 length -= tlen;
734
735 io_len -= tlen;
736 if ((!io_len) && (length)) {
737 io_sg = sg_next(io_sg);
738 io_p = sg_virt(io_sg);
739 io_len = io_sg->length;
740 } else
741 io_p += tlen;
742
743 data_len -= tlen;
744 if ((!data_len) && (length)) {
745 data_sg = sg_next(data_sg);
746 data_p = sg_virt(data_sg);
747 data_len = data_sg->length;
748 } else
749 data_p += tlen;
750 }
751}
752
753static int
754fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
755 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
756{
757 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
758 struct nvmefc_fcp_req *fcpreq;
759 u32 rsplen = 0, xfrlen = 0;
760 int fcp_err = 0, active, aborted;
761 u8 op = tgt_fcpreq->op;
762
763 spin_lock_irq(&tfcp_req->reqlock);
764 fcpreq = tfcp_req->fcpreq;
765 active = tfcp_req->active;
766 aborted = tfcp_req->aborted;
767 tfcp_req->active = true;
768 spin_unlock_irq(&tfcp_req->reqlock);
769
770 if (unlikely(active))
771
772 return -EALREADY;
773
774 if (unlikely(aborted)) {
775
776 spin_lock_irq(&tfcp_req->reqlock);
777 tfcp_req->active = false;
778 spin_unlock_irq(&tfcp_req->reqlock);
779 tgt_fcpreq->transferred_length = 0;
780 tgt_fcpreq->fcp_error = -ECANCELED;
781 tgt_fcpreq->done(tgt_fcpreq);
782 return 0;
783 }
784
785
786
787
788
789
790
791 switch (op) {
792 case NVMET_FCOP_WRITEDATA:
793 xfrlen = tgt_fcpreq->transfer_length;
794 if (fcpreq) {
795 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
796 fcpreq->first_sgl, tgt_fcpreq->offset,
797 xfrlen);
798 fcpreq->transferred_length += xfrlen;
799 }
800 break;
801
802 case NVMET_FCOP_READDATA:
803 case NVMET_FCOP_READDATA_RSP:
804 xfrlen = tgt_fcpreq->transfer_length;
805 if (fcpreq) {
806 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
807 fcpreq->first_sgl, tgt_fcpreq->offset,
808 xfrlen);
809 fcpreq->transferred_length += xfrlen;
810 }
811 if (op == NVMET_FCOP_READDATA)
812 break;
813
814
815 fallthrough;
816
817 case NVMET_FCOP_RSP:
818 if (fcpreq) {
819 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
820 fcpreq->rsplen : tgt_fcpreq->rsplen);
821 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
822 if (rsplen < tgt_fcpreq->rsplen)
823 fcp_err = -E2BIG;
824 fcpreq->rcv_rsplen = rsplen;
825 fcpreq->status = 0;
826 }
827 tfcp_req->status = 0;
828 break;
829
830 default:
831 fcp_err = -EINVAL;
832 break;
833 }
834
835 spin_lock_irq(&tfcp_req->reqlock);
836 tfcp_req->active = false;
837 spin_unlock_irq(&tfcp_req->reqlock);
838
839 tgt_fcpreq->transferred_length = xfrlen;
840 tgt_fcpreq->fcp_error = fcp_err;
841 tgt_fcpreq->done(tgt_fcpreq);
842
843 return 0;
844}
845
846static void
847fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
848 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
849{
850 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
851
852
853
854
855
856
857 spin_lock_irq(&tfcp_req->reqlock);
858 tfcp_req->aborted = true;
859 spin_unlock_irq(&tfcp_req->reqlock);
860
861 tfcp_req->status = NVME_SC_INTERNAL;
862
863
864
865
866
867
868}
869
870static void
871fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
872 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
873{
874 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
875
876 schedule_work(&tfcp_req->tio_done_work);
877}
878
879static void
880fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
881 struct nvme_fc_remote_port *remoteport,
882 struct nvmefc_ls_req *lsreq)
883{
884}
885
886static void
887fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
888 void *hosthandle, struct nvmefc_ls_req *lsreq)
889{
890}
891
892static void
893fcloop_fcp_abort(struct nvme_fc_local_port *localport,
894 struct nvme_fc_remote_port *remoteport,
895 void *hw_queue_handle,
896 struct nvmefc_fcp_req *fcpreq)
897{
898 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
899 struct fcloop_fcpreq *tfcp_req;
900 bool abortio = true;
901
902 spin_lock(&inireq->inilock);
903 tfcp_req = inireq->tfcp_req;
904 if (tfcp_req)
905 fcloop_tfcp_req_get(tfcp_req);
906 spin_unlock(&inireq->inilock);
907
908 if (!tfcp_req)
909
910 return;
911
912
913 spin_lock_irq(&tfcp_req->reqlock);
914 switch (tfcp_req->inistate) {
915 case INI_IO_START:
916 case INI_IO_ACTIVE:
917 tfcp_req->inistate = INI_IO_ABORTED;
918 break;
919 case INI_IO_COMPLETED:
920 abortio = false;
921 break;
922 default:
923 spin_unlock_irq(&tfcp_req->reqlock);
924 WARN_ON(1);
925 return;
926 }
927 spin_unlock_irq(&tfcp_req->reqlock);
928
929 if (abortio)
930
931 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
932 else {
933
934
935
936
937 fcloop_tfcp_req_put(tfcp_req);
938 }
939}
940
941static void
942fcloop_nport_free(struct kref *ref)
943{
944 struct fcloop_nport *nport =
945 container_of(ref, struct fcloop_nport, ref);
946 unsigned long flags;
947
948 spin_lock_irqsave(&fcloop_lock, flags);
949 list_del(&nport->nport_list);
950 spin_unlock_irqrestore(&fcloop_lock, flags);
951
952 kfree(nport);
953}
954
955static void
956fcloop_nport_put(struct fcloop_nport *nport)
957{
958 kref_put(&nport->ref, fcloop_nport_free);
959}
960
961static int
962fcloop_nport_get(struct fcloop_nport *nport)
963{
964 return kref_get_unless_zero(&nport->ref);
965}
966
967static void
968fcloop_localport_delete(struct nvme_fc_local_port *localport)
969{
970 struct fcloop_lport_priv *lport_priv = localport->private;
971 struct fcloop_lport *lport = lport_priv->lport;
972
973
974 complete(&lport->unreg_done);
975}
976
977static void
978fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
979{
980 struct fcloop_rport *rport = remoteport->private;
981
982 flush_work(&rport->ls_work);
983 fcloop_nport_put(rport->nport);
984}
985
986static void
987fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
988{
989 struct fcloop_tport *tport = targetport->private;
990
991 flush_work(&tport->ls_work);
992 fcloop_nport_put(tport->nport);
993}
994
995#define FCLOOP_HW_QUEUES 4
996#define FCLOOP_SGL_SEGS 256
997#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
998
999static struct nvme_fc_port_template fctemplate = {
1000 .localport_delete = fcloop_localport_delete,
1001 .remoteport_delete = fcloop_remoteport_delete,
1002 .create_queue = fcloop_create_queue,
1003 .delete_queue = fcloop_delete_queue,
1004 .ls_req = fcloop_h2t_ls_req,
1005 .fcp_io = fcloop_fcp_req,
1006 .ls_abort = fcloop_h2t_ls_abort,
1007 .fcp_abort = fcloop_fcp_abort,
1008 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1009 .max_hw_queues = FCLOOP_HW_QUEUES,
1010 .max_sgl_segments = FCLOOP_SGL_SEGS,
1011 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1012 .dma_boundary = FCLOOP_DMABOUND_4G,
1013
1014 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1015 .remote_priv_sz = sizeof(struct fcloop_rport),
1016 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1017 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1018};
1019
1020static struct nvmet_fc_target_template tgttemplate = {
1021 .targetport_delete = fcloop_targetport_delete,
1022 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1023 .fcp_op = fcloop_fcp_op,
1024 .fcp_abort = fcloop_tgt_fcp_abort,
1025 .fcp_req_release = fcloop_fcp_req_release,
1026 .discovery_event = fcloop_tgt_discovery_evt,
1027 .ls_req = fcloop_t2h_ls_req,
1028 .ls_abort = fcloop_t2h_ls_abort,
1029 .host_release = fcloop_t2h_host_release,
1030 .max_hw_queues = FCLOOP_HW_QUEUES,
1031 .max_sgl_segments = FCLOOP_SGL_SEGS,
1032 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1033 .dma_boundary = FCLOOP_DMABOUND_4G,
1034
1035 .target_features = 0,
1036
1037 .target_priv_sz = sizeof(struct fcloop_tport),
1038 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1039};
1040
1041static ssize_t
1042fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1043 const char *buf, size_t count)
1044{
1045 struct nvme_fc_port_info pinfo;
1046 struct fcloop_ctrl_options *opts;
1047 struct nvme_fc_local_port *localport;
1048 struct fcloop_lport *lport;
1049 struct fcloop_lport_priv *lport_priv;
1050 unsigned long flags;
1051 int ret = -ENOMEM;
1052
1053 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1054 if (!lport)
1055 return -ENOMEM;
1056
1057 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1058 if (!opts)
1059 goto out_free_lport;
1060
1061 ret = fcloop_parse_options(opts, buf);
1062 if (ret)
1063 goto out_free_opts;
1064
1065
1066 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1067 ret = -EINVAL;
1068 goto out_free_opts;
1069 }
1070
1071 memset(&pinfo, 0, sizeof(pinfo));
1072 pinfo.node_name = opts->wwnn;
1073 pinfo.port_name = opts->wwpn;
1074 pinfo.port_role = opts->roles;
1075 pinfo.port_id = opts->fcaddr;
1076
1077 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1078 if (!ret) {
1079
1080 lport_priv = localport->private;
1081 lport_priv->lport = lport;
1082
1083 lport->localport = localport;
1084 INIT_LIST_HEAD(&lport->lport_list);
1085
1086 spin_lock_irqsave(&fcloop_lock, flags);
1087 list_add_tail(&lport->lport_list, &fcloop_lports);
1088 spin_unlock_irqrestore(&fcloop_lock, flags);
1089 }
1090
1091out_free_opts:
1092 kfree(opts);
1093out_free_lport:
1094
1095 if (ret)
1096 kfree(lport);
1097
1098 return ret ? ret : count;
1099}
1100
1101
1102static void
1103__unlink_local_port(struct fcloop_lport *lport)
1104{
1105 list_del(&lport->lport_list);
1106}
1107
1108static int
1109__wait_localport_unreg(struct fcloop_lport *lport)
1110{
1111 int ret;
1112
1113 init_completion(&lport->unreg_done);
1114
1115 ret = nvme_fc_unregister_localport(lport->localport);
1116
1117 wait_for_completion(&lport->unreg_done);
1118
1119 kfree(lport);
1120
1121 return ret;
1122}
1123
1124
1125static ssize_t
1126fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1127 const char *buf, size_t count)
1128{
1129 struct fcloop_lport *tlport, *lport = NULL;
1130 u64 nodename, portname;
1131 unsigned long flags;
1132 int ret;
1133
1134 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1135 if (ret)
1136 return ret;
1137
1138 spin_lock_irqsave(&fcloop_lock, flags);
1139
1140 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1141 if (tlport->localport->node_name == nodename &&
1142 tlport->localport->port_name == portname) {
1143 lport = tlport;
1144 __unlink_local_port(lport);
1145 break;
1146 }
1147 }
1148 spin_unlock_irqrestore(&fcloop_lock, flags);
1149
1150 if (!lport)
1151 return -ENOENT;
1152
1153 ret = __wait_localport_unreg(lport);
1154
1155 return ret ? ret : count;
1156}
1157
1158static struct fcloop_nport *
1159fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1160{
1161 struct fcloop_nport *newnport, *nport = NULL;
1162 struct fcloop_lport *tmplport, *lport = NULL;
1163 struct fcloop_ctrl_options *opts;
1164 unsigned long flags;
1165 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1166 int ret;
1167
1168 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1169 if (!opts)
1170 return NULL;
1171
1172 ret = fcloop_parse_options(opts, buf);
1173 if (ret)
1174 goto out_free_opts;
1175
1176
1177 if ((opts->mask & opts_mask) != opts_mask) {
1178 ret = -EINVAL;
1179 goto out_free_opts;
1180 }
1181
1182 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1183 if (!newnport)
1184 goto out_free_opts;
1185
1186 INIT_LIST_HEAD(&newnport->nport_list);
1187 newnport->node_name = opts->wwnn;
1188 newnport->port_name = opts->wwpn;
1189 if (opts->mask & NVMF_OPT_ROLES)
1190 newnport->port_role = opts->roles;
1191 if (opts->mask & NVMF_OPT_FCADDR)
1192 newnport->port_id = opts->fcaddr;
1193 kref_init(&newnport->ref);
1194
1195 spin_lock_irqsave(&fcloop_lock, flags);
1196
1197 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1198 if (tmplport->localport->node_name == opts->wwnn &&
1199 tmplport->localport->port_name == opts->wwpn)
1200 goto out_invalid_opts;
1201
1202 if (tmplport->localport->node_name == opts->lpwwnn &&
1203 tmplport->localport->port_name == opts->lpwwpn)
1204 lport = tmplport;
1205 }
1206
1207 if (remoteport) {
1208 if (!lport)
1209 goto out_invalid_opts;
1210 newnport->lport = lport;
1211 }
1212
1213 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1214 if (nport->node_name == opts->wwnn &&
1215 nport->port_name == opts->wwpn) {
1216 if ((remoteport && nport->rport) ||
1217 (!remoteport && nport->tport)) {
1218 nport = NULL;
1219 goto out_invalid_opts;
1220 }
1221
1222 fcloop_nport_get(nport);
1223
1224 spin_unlock_irqrestore(&fcloop_lock, flags);
1225
1226 if (remoteport)
1227 nport->lport = lport;
1228 if (opts->mask & NVMF_OPT_ROLES)
1229 nport->port_role = opts->roles;
1230 if (opts->mask & NVMF_OPT_FCADDR)
1231 nport->port_id = opts->fcaddr;
1232 goto out_free_newnport;
1233 }
1234 }
1235
1236 list_add_tail(&newnport->nport_list, &fcloop_nports);
1237
1238 spin_unlock_irqrestore(&fcloop_lock, flags);
1239
1240 kfree(opts);
1241 return newnport;
1242
1243out_invalid_opts:
1244 spin_unlock_irqrestore(&fcloop_lock, flags);
1245out_free_newnport:
1246 kfree(newnport);
1247out_free_opts:
1248 kfree(opts);
1249 return nport;
1250}
1251
1252static ssize_t
1253fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1254 const char *buf, size_t count)
1255{
1256 struct nvme_fc_remote_port *remoteport;
1257 struct fcloop_nport *nport;
1258 struct fcloop_rport *rport;
1259 struct nvme_fc_port_info pinfo;
1260 int ret;
1261
1262 nport = fcloop_alloc_nport(buf, count, true);
1263 if (!nport)
1264 return -EIO;
1265
1266 memset(&pinfo, 0, sizeof(pinfo));
1267 pinfo.node_name = nport->node_name;
1268 pinfo.port_name = nport->port_name;
1269 pinfo.port_role = nport->port_role;
1270 pinfo.port_id = nport->port_id;
1271
1272 ret = nvme_fc_register_remoteport(nport->lport->localport,
1273 &pinfo, &remoteport);
1274 if (ret || !remoteport) {
1275 fcloop_nport_put(nport);
1276 return ret;
1277 }
1278
1279
1280 rport = remoteport->private;
1281 rport->remoteport = remoteport;
1282 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1283 if (nport->tport) {
1284 nport->tport->remoteport = remoteport;
1285 nport->tport->lport = nport->lport;
1286 }
1287 rport->nport = nport;
1288 rport->lport = nport->lport;
1289 nport->rport = rport;
1290 spin_lock_init(&rport->lock);
1291 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1292 INIT_LIST_HEAD(&rport->ls_list);
1293
1294 return count;
1295}
1296
1297
1298static struct fcloop_rport *
1299__unlink_remote_port(struct fcloop_nport *nport)
1300{
1301 struct fcloop_rport *rport = nport->rport;
1302
1303 if (rport && nport->tport)
1304 nport->tport->remoteport = NULL;
1305 nport->rport = NULL;
1306
1307 return rport;
1308}
1309
1310static int
1311__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1312{
1313 if (!rport)
1314 return -EALREADY;
1315
1316 return nvme_fc_unregister_remoteport(rport->remoteport);
1317}
1318
1319static ssize_t
1320fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1321 const char *buf, size_t count)
1322{
1323 struct fcloop_nport *nport = NULL, *tmpport;
1324 static struct fcloop_rport *rport;
1325 u64 nodename, portname;
1326 unsigned long flags;
1327 int ret;
1328
1329 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1330 if (ret)
1331 return ret;
1332
1333 spin_lock_irqsave(&fcloop_lock, flags);
1334
1335 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1336 if (tmpport->node_name == nodename &&
1337 tmpport->port_name == portname && tmpport->rport) {
1338 nport = tmpport;
1339 rport = __unlink_remote_port(nport);
1340 break;
1341 }
1342 }
1343
1344 spin_unlock_irqrestore(&fcloop_lock, flags);
1345
1346 if (!nport)
1347 return -ENOENT;
1348
1349 ret = __remoteport_unreg(nport, rport);
1350
1351 return ret ? ret : count;
1352}
1353
1354static ssize_t
1355fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1356 const char *buf, size_t count)
1357{
1358 struct nvmet_fc_target_port *targetport;
1359 struct fcloop_nport *nport;
1360 struct fcloop_tport *tport;
1361 struct nvmet_fc_port_info tinfo;
1362 int ret;
1363
1364 nport = fcloop_alloc_nport(buf, count, false);
1365 if (!nport)
1366 return -EIO;
1367
1368 tinfo.node_name = nport->node_name;
1369 tinfo.port_name = nport->port_name;
1370 tinfo.port_id = nport->port_id;
1371
1372 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1373 &targetport);
1374 if (ret) {
1375 fcloop_nport_put(nport);
1376 return ret;
1377 }
1378
1379
1380 tport = targetport->private;
1381 tport->targetport = targetport;
1382 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1383 if (nport->rport)
1384 nport->rport->targetport = targetport;
1385 tport->nport = nport;
1386 tport->lport = nport->lport;
1387 nport->tport = tport;
1388 spin_lock_init(&tport->lock);
1389 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1390 INIT_LIST_HEAD(&tport->ls_list);
1391
1392 return count;
1393}
1394
1395
1396static struct fcloop_tport *
1397__unlink_target_port(struct fcloop_nport *nport)
1398{
1399 struct fcloop_tport *tport = nport->tport;
1400
1401 if (tport && nport->rport)
1402 nport->rport->targetport = NULL;
1403 nport->tport = NULL;
1404
1405 return tport;
1406}
1407
1408static int
1409__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1410{
1411 if (!tport)
1412 return -EALREADY;
1413
1414 return nvmet_fc_unregister_targetport(tport->targetport);
1415}
1416
1417static ssize_t
1418fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1419 const char *buf, size_t count)
1420{
1421 struct fcloop_nport *nport = NULL, *tmpport;
1422 struct fcloop_tport *tport = NULL;
1423 u64 nodename, portname;
1424 unsigned long flags;
1425 int ret;
1426
1427 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1428 if (ret)
1429 return ret;
1430
1431 spin_lock_irqsave(&fcloop_lock, flags);
1432
1433 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1434 if (tmpport->node_name == nodename &&
1435 tmpport->port_name == portname && tmpport->tport) {
1436 nport = tmpport;
1437 tport = __unlink_target_port(nport);
1438 break;
1439 }
1440 }
1441
1442 spin_unlock_irqrestore(&fcloop_lock, flags);
1443
1444 if (!nport)
1445 return -ENOENT;
1446
1447 ret = __targetport_unreg(nport, tport);
1448
1449 return ret ? ret : count;
1450}
1451
1452
1453static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1454static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1455static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1456static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1457static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1458static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1459
1460static struct attribute *fcloop_dev_attrs[] = {
1461 &dev_attr_add_local_port.attr,
1462 &dev_attr_del_local_port.attr,
1463 &dev_attr_add_remote_port.attr,
1464 &dev_attr_del_remote_port.attr,
1465 &dev_attr_add_target_port.attr,
1466 &dev_attr_del_target_port.attr,
1467 NULL
1468};
1469
1470static struct attribute_group fclopp_dev_attrs_group = {
1471 .attrs = fcloop_dev_attrs,
1472};
1473
1474static const struct attribute_group *fcloop_dev_attr_groups[] = {
1475 &fclopp_dev_attrs_group,
1476 NULL,
1477};
1478
1479static struct class *fcloop_class;
1480static struct device *fcloop_device;
1481
1482
1483static int __init fcloop_init(void)
1484{
1485 int ret;
1486
1487 fcloop_class = class_create(THIS_MODULE, "fcloop");
1488 if (IS_ERR(fcloop_class)) {
1489 pr_err("couldn't register class fcloop\n");
1490 ret = PTR_ERR(fcloop_class);
1491 return ret;
1492 }
1493
1494 fcloop_device = device_create_with_groups(
1495 fcloop_class, NULL, MKDEV(0, 0), NULL,
1496 fcloop_dev_attr_groups, "ctl");
1497 if (IS_ERR(fcloop_device)) {
1498 pr_err("couldn't create ctl device!\n");
1499 ret = PTR_ERR(fcloop_device);
1500 goto out_destroy_class;
1501 }
1502
1503 get_device(fcloop_device);
1504
1505 return 0;
1506
1507out_destroy_class:
1508 class_destroy(fcloop_class);
1509 return ret;
1510}
1511
1512static void __exit fcloop_exit(void)
1513{
1514 struct fcloop_lport *lport;
1515 struct fcloop_nport *nport;
1516 struct fcloop_tport *tport;
1517 struct fcloop_rport *rport;
1518 unsigned long flags;
1519 int ret;
1520
1521 spin_lock_irqsave(&fcloop_lock, flags);
1522
1523 for (;;) {
1524 nport = list_first_entry_or_null(&fcloop_nports,
1525 typeof(*nport), nport_list);
1526 if (!nport)
1527 break;
1528
1529 tport = __unlink_target_port(nport);
1530 rport = __unlink_remote_port(nport);
1531
1532 spin_unlock_irqrestore(&fcloop_lock, flags);
1533
1534 ret = __targetport_unreg(nport, tport);
1535 if (ret)
1536 pr_warn("%s: Failed deleting target port\n", __func__);
1537
1538 ret = __remoteport_unreg(nport, rport);
1539 if (ret)
1540 pr_warn("%s: Failed deleting remote port\n", __func__);
1541
1542 spin_lock_irqsave(&fcloop_lock, flags);
1543 }
1544
1545 for (;;) {
1546 lport = list_first_entry_or_null(&fcloop_lports,
1547 typeof(*lport), lport_list);
1548 if (!lport)
1549 break;
1550
1551 __unlink_local_port(lport);
1552
1553 spin_unlock_irqrestore(&fcloop_lock, flags);
1554
1555 ret = __wait_localport_unreg(lport);
1556 if (ret)
1557 pr_warn("%s: Failed deleting local port\n", __func__);
1558
1559 spin_lock_irqsave(&fcloop_lock, flags);
1560 }
1561
1562 spin_unlock_irqrestore(&fcloop_lock, flags);
1563
1564 put_device(fcloop_device);
1565
1566 device_destroy(fcloop_class, MKDEV(0, 0));
1567 class_destroy(fcloop_class);
1568}
1569
1570module_init(fcloop_init);
1571module_exit(fcloop_exit);
1572
1573MODULE_LICENSE("GPL v2");
1574