1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9
10#include "../host/nvme.h"
11#include "../target/nvmet.h"
12#include <linux/nvme-fc-driver.h>
13#include <linux/nvme-fc.h>
14
15
16enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24};
25
26struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34};
35
36static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44};
45
46static int
47fcloop_parse_options(struct fcloop_ctrl_options *opts,
48 const char *buf)
49{
50 substring_t args[MAX_OPT_ARGS];
51 char *options, *o, *p;
52 int token, ret = 0;
53 u64 token64;
54
55 options = o = kstrdup(buf, GFP_KERNEL);
56 if (!options)
57 return -ENOMEM;
58
59 while ((p = strsep(&o, ",\n")) != NULL) {
60 if (!*p)
61 continue;
62
63 token = match_token(p, opt_tokens, args);
64 opts->mask |= token;
65 switch (token) {
66 case NVMF_OPT_WWNN:
67 if (match_u64(args, &token64)) {
68 ret = -EINVAL;
69 goto out_free_options;
70 }
71 opts->wwnn = token64;
72 break;
73 case NVMF_OPT_WWPN:
74 if (match_u64(args, &token64)) {
75 ret = -EINVAL;
76 goto out_free_options;
77 }
78 opts->wwpn = token64;
79 break;
80 case NVMF_OPT_ROLES:
81 if (match_int(args, &token)) {
82 ret = -EINVAL;
83 goto out_free_options;
84 }
85 opts->roles = token;
86 break;
87 case NVMF_OPT_FCADDR:
88 if (match_hex(args, &token)) {
89 ret = -EINVAL;
90 goto out_free_options;
91 }
92 opts->fcaddr = token;
93 break;
94 case NVMF_OPT_LPWWNN:
95 if (match_u64(args, &token64)) {
96 ret = -EINVAL;
97 goto out_free_options;
98 }
99 opts->lpwwnn = token64;
100 break;
101 case NVMF_OPT_LPWWPN:
102 if (match_u64(args, &token64)) {
103 ret = -EINVAL;
104 goto out_free_options;
105 }
106 opts->lpwwpn = token64;
107 break;
108 default:
109 pr_warn("unknown parameter or missing value '%s'\n", p);
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 }
114
115out_free_options:
116 kfree(options);
117 return ret;
118}
119
120
121static int
122fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
123 const char *buf)
124{
125 substring_t args[MAX_OPT_ARGS];
126 char *options, *o, *p;
127 int token, ret = 0;
128 u64 token64;
129
130 *nname = -1;
131 *pname = -1;
132
133 options = o = kstrdup(buf, GFP_KERNEL);
134 if (!options)
135 return -ENOMEM;
136
137 while ((p = strsep(&o, ",\n")) != NULL) {
138 if (!*p)
139 continue;
140
141 token = match_token(p, opt_tokens, args);
142 switch (token) {
143 case NVMF_OPT_WWNN:
144 if (match_u64(args, &token64)) {
145 ret = -EINVAL;
146 goto out_free_options;
147 }
148 *nname = token64;
149 break;
150 case NVMF_OPT_WWPN:
151 if (match_u64(args, &token64)) {
152 ret = -EINVAL;
153 goto out_free_options;
154 }
155 *pname = token64;
156 break;
157 default:
158 pr_warn("unknown parameter or missing value '%s'\n", p);
159 ret = -EINVAL;
160 goto out_free_options;
161 }
162 }
163
164out_free_options:
165 kfree(options);
166
167 if (!ret) {
168 if (*nname == -1)
169 return -EINVAL;
170 if (*pname == -1)
171 return -EINVAL;
172 }
173
174 return ret;
175}
176
177
178#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
179
180#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
182
183#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
184
185
186static DEFINE_SPINLOCK(fcloop_lock);
187static LIST_HEAD(fcloop_lports);
188static LIST_HEAD(fcloop_nports);
189
190struct fcloop_lport {
191 struct nvme_fc_local_port *localport;
192 struct list_head lport_list;
193 struct completion unreg_done;
194};
195
196struct fcloop_lport_priv {
197 struct fcloop_lport *lport;
198};
199
200struct fcloop_rport {
201 struct nvme_fc_remote_port *remoteport;
202 struct nvmet_fc_target_port *targetport;
203 struct fcloop_nport *nport;
204 struct fcloop_lport *lport;
205 spinlock_t lock;
206 struct list_head ls_list;
207 struct work_struct ls_work;
208};
209
210struct fcloop_tport {
211 struct nvmet_fc_target_port *targetport;
212 struct nvme_fc_remote_port *remoteport;
213 struct fcloop_nport *nport;
214 struct fcloop_lport *lport;
215 spinlock_t lock;
216 struct list_head ls_list;
217 struct work_struct ls_work;
218};
219
220struct fcloop_nport {
221 struct fcloop_rport *rport;
222 struct fcloop_tport *tport;
223 struct fcloop_lport *lport;
224 struct list_head nport_list;
225 struct kref ref;
226 u64 node_name;
227 u64 port_name;
228 u32 port_role;
229 u32 port_id;
230};
231
232struct fcloop_lsreq {
233 struct nvmefc_ls_req *lsreq;
234 struct nvmefc_ls_rsp ls_rsp;
235 int lsdir;
236 int status;
237 struct list_head ls_list;
238};
239
240struct fcloop_rscn {
241 struct fcloop_tport *tport;
242 struct work_struct work;
243};
244
245enum {
246 INI_IO_START = 0,
247 INI_IO_ACTIVE = 1,
248 INI_IO_ABORTED = 2,
249 INI_IO_COMPLETED = 3,
250};
251
252struct fcloop_fcpreq {
253 struct fcloop_tport *tport;
254 struct nvmefc_fcp_req *fcpreq;
255 spinlock_t reqlock;
256 u16 status;
257 u32 inistate;
258 bool active;
259 bool aborted;
260 struct kref ref;
261 struct work_struct fcp_rcv_work;
262 struct work_struct abort_rcv_work;
263 struct work_struct tio_done_work;
264 struct nvmefc_tgt_fcp_req tgt_fcp_req;
265};
266
267struct fcloop_ini_fcpreq {
268 struct nvmefc_fcp_req *fcpreq;
269 struct fcloop_fcpreq *tfcp_req;
270 spinlock_t inilock;
271};
272
273static inline struct fcloop_lsreq *
274ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
275{
276 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
277}
278
279static inline struct fcloop_fcpreq *
280tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
281{
282 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
283}
284
285
286static int
287fcloop_create_queue(struct nvme_fc_local_port *localport,
288 unsigned int qidx, u16 qsize,
289 void **handle)
290{
291 *handle = localport;
292 return 0;
293}
294
295static void
296fcloop_delete_queue(struct nvme_fc_local_port *localport,
297 unsigned int idx, void *handle)
298{
299}
300
301static void
302fcloop_rport_lsrqst_work(struct work_struct *work)
303{
304 struct fcloop_rport *rport =
305 container_of(work, struct fcloop_rport, ls_work);
306 struct fcloop_lsreq *tls_req;
307
308 spin_lock(&rport->lock);
309 for (;;) {
310 tls_req = list_first_entry_or_null(&rport->ls_list,
311 struct fcloop_lsreq, ls_list);
312 if (!tls_req)
313 break;
314
315 list_del(&tls_req->ls_list);
316 spin_unlock(&rport->lock);
317
318 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
319
320
321
322
323
324 spin_lock(&rport->lock);
325 }
326 spin_unlock(&rport->lock);
327}
328
329static int
330fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
331 struct nvme_fc_remote_port *remoteport,
332 struct nvmefc_ls_req *lsreq)
333{
334 struct fcloop_lsreq *tls_req = lsreq->private;
335 struct fcloop_rport *rport = remoteport->private;
336 int ret = 0;
337
338 tls_req->lsreq = lsreq;
339 INIT_LIST_HEAD(&tls_req->ls_list);
340
341 if (!rport->targetport) {
342 tls_req->status = -ECONNREFUSED;
343 spin_lock(&rport->lock);
344 list_add_tail(&rport->ls_list, &tls_req->ls_list);
345 spin_unlock(&rport->lock);
346 schedule_work(&rport->ls_work);
347 return ret;
348 }
349
350 tls_req->status = 0;
351 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
352 &tls_req->ls_rsp,
353 lsreq->rqstaddr, lsreq->rqstlen);
354
355 return ret;
356}
357
358static int
359fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
360 struct nvmefc_ls_rsp *lsrsp)
361{
362 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
363 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
364 struct fcloop_tport *tport = targetport->private;
365 struct nvme_fc_remote_port *remoteport = tport->remoteport;
366 struct fcloop_rport *rport;
367
368 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
369 ((lsreq->rsplen < lsrsp->rsplen) ?
370 lsreq->rsplen : lsrsp->rsplen));
371
372 lsrsp->done(lsrsp);
373
374 if (remoteport) {
375 rport = remoteport->private;
376 spin_lock(&rport->lock);
377 list_add_tail(&rport->ls_list, &tls_req->ls_list);
378 spin_unlock(&rport->lock);
379 schedule_work(&rport->ls_work);
380 }
381
382 return 0;
383}
384
385static void
386fcloop_tport_lsrqst_work(struct work_struct *work)
387{
388 struct fcloop_tport *tport =
389 container_of(work, struct fcloop_tport, ls_work);
390 struct fcloop_lsreq *tls_req;
391
392 spin_lock(&tport->lock);
393 for (;;) {
394 tls_req = list_first_entry_or_null(&tport->ls_list,
395 struct fcloop_lsreq, ls_list);
396 if (!tls_req)
397 break;
398
399 list_del(&tls_req->ls_list);
400 spin_unlock(&tport->lock);
401
402 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
403
404
405
406
407
408 spin_lock(&tport->lock);
409 }
410 spin_unlock(&tport->lock);
411}
412
413static int
414fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
415 struct nvmefc_ls_req *lsreq)
416{
417 struct fcloop_lsreq *tls_req = lsreq->private;
418 struct fcloop_tport *tport = targetport->private;
419 int ret = 0;
420
421
422
423
424
425
426 tls_req->lsreq = lsreq;
427 INIT_LIST_HEAD(&tls_req->ls_list);
428
429 if (!tport->remoteport) {
430 tls_req->status = -ECONNREFUSED;
431 spin_lock(&tport->lock);
432 list_add_tail(&tport->ls_list, &tls_req->ls_list);
433 spin_unlock(&tport->lock);
434 schedule_work(&tport->ls_work);
435 return ret;
436 }
437
438 tls_req->status = 0;
439 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
440 lsreq->rqstaddr, lsreq->rqstlen);
441
442 return ret;
443}
444
445static int
446fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
447 struct nvme_fc_remote_port *remoteport,
448 struct nvmefc_ls_rsp *lsrsp)
449{
450 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
451 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
452 struct fcloop_rport *rport = remoteport->private;
453 struct nvmet_fc_target_port *targetport = rport->targetport;
454 struct fcloop_tport *tport;
455
456 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
457 ((lsreq->rsplen < lsrsp->rsplen) ?
458 lsreq->rsplen : lsrsp->rsplen));
459 lsrsp->done(lsrsp);
460
461 if (targetport) {
462 tport = targetport->private;
463 spin_lock(&tport->lock);
464 list_add_tail(&tport->ls_list, &tls_req->ls_list);
465 spin_unlock(&tport->lock);
466 schedule_work(&tport->ls_work);
467 }
468
469 return 0;
470}
471
472static void
473fcloop_t2h_host_release(void *hosthandle)
474{
475
476}
477
478
479
480
481
482static void
483fcloop_tgt_rscn_work(struct work_struct *work)
484{
485 struct fcloop_rscn *tgt_rscn =
486 container_of(work, struct fcloop_rscn, work);
487 struct fcloop_tport *tport = tgt_rscn->tport;
488
489 if (tport->remoteport)
490 nvme_fc_rescan_remoteport(tport->remoteport);
491 kfree(tgt_rscn);
492}
493
494static void
495fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
496{
497 struct fcloop_rscn *tgt_rscn;
498
499 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
500 if (!tgt_rscn)
501 return;
502
503 tgt_rscn->tport = tgtport->private;
504 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
505
506 schedule_work(&tgt_rscn->work);
507}
508
509static void
510fcloop_tfcp_req_free(struct kref *ref)
511{
512 struct fcloop_fcpreq *tfcp_req =
513 container_of(ref, struct fcloop_fcpreq, ref);
514
515 kfree(tfcp_req);
516}
517
518static void
519fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
520{
521 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
522}
523
524static int
525fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
526{
527 return kref_get_unless_zero(&tfcp_req->ref);
528}
529
530static void
531fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
532 struct fcloop_fcpreq *tfcp_req, int status)
533{
534 struct fcloop_ini_fcpreq *inireq = NULL;
535
536 if (fcpreq) {
537 inireq = fcpreq->private;
538 spin_lock(&inireq->inilock);
539 inireq->tfcp_req = NULL;
540 spin_unlock(&inireq->inilock);
541
542 fcpreq->status = status;
543 fcpreq->done(fcpreq);
544 }
545
546
547 fcloop_tfcp_req_put(tfcp_req);
548}
549
550static void
551fcloop_fcp_recv_work(struct work_struct *work)
552{
553 struct fcloop_fcpreq *tfcp_req =
554 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
555 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
556 int ret = 0;
557 bool aborted = false;
558
559 spin_lock_irq(&tfcp_req->reqlock);
560 switch (tfcp_req->inistate) {
561 case INI_IO_START:
562 tfcp_req->inistate = INI_IO_ACTIVE;
563 break;
564 case INI_IO_ABORTED:
565 aborted = true;
566 break;
567 default:
568 spin_unlock_irq(&tfcp_req->reqlock);
569 WARN_ON(1);
570 return;
571 }
572 spin_unlock_irq(&tfcp_req->reqlock);
573
574 if (unlikely(aborted))
575 ret = -ECANCELED;
576 else
577 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
578 &tfcp_req->tgt_fcp_req,
579 fcpreq->cmdaddr, fcpreq->cmdlen);
580 if (ret)
581 fcloop_call_host_done(fcpreq, tfcp_req, ret);
582
583 return;
584}
585
586static void
587fcloop_fcp_abort_recv_work(struct work_struct *work)
588{
589 struct fcloop_fcpreq *tfcp_req =
590 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
591 struct nvmefc_fcp_req *fcpreq;
592 bool completed = false;
593
594 spin_lock_irq(&tfcp_req->reqlock);
595 fcpreq = tfcp_req->fcpreq;
596 switch (tfcp_req->inistate) {
597 case INI_IO_ABORTED:
598 break;
599 case INI_IO_COMPLETED:
600 completed = true;
601 break;
602 default:
603 spin_unlock_irq(&tfcp_req->reqlock);
604 WARN_ON(1);
605 return;
606 }
607 spin_unlock_irq(&tfcp_req->reqlock);
608
609 if (unlikely(completed)) {
610
611 fcloop_tfcp_req_put(tfcp_req);
612 return;
613 }
614
615 if (tfcp_req->tport->targetport)
616 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
617 &tfcp_req->tgt_fcp_req);
618
619 spin_lock_irq(&tfcp_req->reqlock);
620 tfcp_req->fcpreq = NULL;
621 spin_unlock_irq(&tfcp_req->reqlock);
622
623 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
624
625}
626
627
628
629
630
631static void
632fcloop_tgt_fcprqst_done_work(struct work_struct *work)
633{
634 struct fcloop_fcpreq *tfcp_req =
635 container_of(work, struct fcloop_fcpreq, tio_done_work);
636 struct nvmefc_fcp_req *fcpreq;
637
638 spin_lock_irq(&tfcp_req->reqlock);
639 fcpreq = tfcp_req->fcpreq;
640 tfcp_req->inistate = INI_IO_COMPLETED;
641 spin_unlock_irq(&tfcp_req->reqlock);
642
643 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
644}
645
646
647static int
648fcloop_fcp_req(struct nvme_fc_local_port *localport,
649 struct nvme_fc_remote_port *remoteport,
650 void *hw_queue_handle,
651 struct nvmefc_fcp_req *fcpreq)
652{
653 struct fcloop_rport *rport = remoteport->private;
654 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
655 struct fcloop_fcpreq *tfcp_req;
656
657 if (!rport->targetport)
658 return -ECONNREFUSED;
659
660 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
661 if (!tfcp_req)
662 return -ENOMEM;
663
664 inireq->fcpreq = fcpreq;
665 inireq->tfcp_req = tfcp_req;
666 spin_lock_init(&inireq->inilock);
667
668 tfcp_req->fcpreq = fcpreq;
669 tfcp_req->tport = rport->targetport->private;
670 tfcp_req->inistate = INI_IO_START;
671 spin_lock_init(&tfcp_req->reqlock);
672 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
673 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
674 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
675 kref_init(&tfcp_req->ref);
676
677 schedule_work(&tfcp_req->fcp_rcv_work);
678
679 return 0;
680}
681
682static void
683fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
684 struct scatterlist *io_sg, u32 offset, u32 length)
685{
686 void *data_p, *io_p;
687 u32 data_len, io_len, tlen;
688
689 io_p = sg_virt(io_sg);
690 io_len = io_sg->length;
691
692 for ( ; offset; ) {
693 tlen = min_t(u32, offset, io_len);
694 offset -= tlen;
695 io_len -= tlen;
696 if (!io_len) {
697 io_sg = sg_next(io_sg);
698 io_p = sg_virt(io_sg);
699 io_len = io_sg->length;
700 } else
701 io_p += tlen;
702 }
703
704 data_p = sg_virt(data_sg);
705 data_len = data_sg->length;
706
707 for ( ; length; ) {
708 tlen = min_t(u32, io_len, data_len);
709 tlen = min_t(u32, tlen, length);
710
711 if (op == NVMET_FCOP_WRITEDATA)
712 memcpy(data_p, io_p, tlen);
713 else
714 memcpy(io_p, data_p, tlen);
715
716 length -= tlen;
717
718 io_len -= tlen;
719 if ((!io_len) && (length)) {
720 io_sg = sg_next(io_sg);
721 io_p = sg_virt(io_sg);
722 io_len = io_sg->length;
723 } else
724 io_p += tlen;
725
726 data_len -= tlen;
727 if ((!data_len) && (length)) {
728 data_sg = sg_next(data_sg);
729 data_p = sg_virt(data_sg);
730 data_len = data_sg->length;
731 } else
732 data_p += tlen;
733 }
734}
735
736static int
737fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
738 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
739{
740 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
741 struct nvmefc_fcp_req *fcpreq;
742 u32 rsplen = 0, xfrlen = 0;
743 int fcp_err = 0, active, aborted;
744 u8 op = tgt_fcpreq->op;
745
746 spin_lock_irq(&tfcp_req->reqlock);
747 fcpreq = tfcp_req->fcpreq;
748 active = tfcp_req->active;
749 aborted = tfcp_req->aborted;
750 tfcp_req->active = true;
751 spin_unlock_irq(&tfcp_req->reqlock);
752
753 if (unlikely(active))
754
755 return -EALREADY;
756
757 if (unlikely(aborted)) {
758
759 spin_lock_irq(&tfcp_req->reqlock);
760 tfcp_req->active = false;
761 spin_unlock_irq(&tfcp_req->reqlock);
762 tgt_fcpreq->transferred_length = 0;
763 tgt_fcpreq->fcp_error = -ECANCELED;
764 tgt_fcpreq->done(tgt_fcpreq);
765 return 0;
766 }
767
768
769
770
771
772
773
774 switch (op) {
775 case NVMET_FCOP_WRITEDATA:
776 xfrlen = tgt_fcpreq->transfer_length;
777 if (fcpreq) {
778 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
779 fcpreq->first_sgl, tgt_fcpreq->offset,
780 xfrlen);
781 fcpreq->transferred_length += xfrlen;
782 }
783 break;
784
785 case NVMET_FCOP_READDATA:
786 case NVMET_FCOP_READDATA_RSP:
787 xfrlen = tgt_fcpreq->transfer_length;
788 if (fcpreq) {
789 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
790 fcpreq->first_sgl, tgt_fcpreq->offset,
791 xfrlen);
792 fcpreq->transferred_length += xfrlen;
793 }
794 if (op == NVMET_FCOP_READDATA)
795 break;
796
797
798
799
800 case NVMET_FCOP_RSP:
801 if (fcpreq) {
802 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
803 fcpreq->rsplen : tgt_fcpreq->rsplen);
804 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
805 if (rsplen < tgt_fcpreq->rsplen)
806 fcp_err = -E2BIG;
807 fcpreq->rcv_rsplen = rsplen;
808 fcpreq->status = 0;
809 }
810 tfcp_req->status = 0;
811 break;
812
813 default:
814 fcp_err = -EINVAL;
815 break;
816 }
817
818 spin_lock_irq(&tfcp_req->reqlock);
819 tfcp_req->active = false;
820 spin_unlock_irq(&tfcp_req->reqlock);
821
822 tgt_fcpreq->transferred_length = xfrlen;
823 tgt_fcpreq->fcp_error = fcp_err;
824 tgt_fcpreq->done(tgt_fcpreq);
825
826 return 0;
827}
828
829static void
830fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
831 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
832{
833 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
834
835
836
837
838
839
840 spin_lock_irq(&tfcp_req->reqlock);
841 tfcp_req->aborted = true;
842 spin_unlock_irq(&tfcp_req->reqlock);
843
844 tfcp_req->status = NVME_SC_INTERNAL;
845
846
847
848
849
850
851}
852
853static void
854fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
855 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
856{
857 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
858
859 schedule_work(&tfcp_req->tio_done_work);
860}
861
862static void
863fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
864 struct nvme_fc_remote_port *remoteport,
865 struct nvmefc_ls_req *lsreq)
866{
867}
868
869static void
870fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
871 void *hosthandle, struct nvmefc_ls_req *lsreq)
872{
873}
874
875static void
876fcloop_fcp_abort(struct nvme_fc_local_port *localport,
877 struct nvme_fc_remote_port *remoteport,
878 void *hw_queue_handle,
879 struct nvmefc_fcp_req *fcpreq)
880{
881 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
882 struct fcloop_fcpreq *tfcp_req;
883 bool abortio = true;
884
885 spin_lock(&inireq->inilock);
886 tfcp_req = inireq->tfcp_req;
887 if (tfcp_req)
888 fcloop_tfcp_req_get(tfcp_req);
889 spin_unlock(&inireq->inilock);
890
891 if (!tfcp_req)
892
893 return;
894
895
896 spin_lock_irq(&tfcp_req->reqlock);
897 switch (tfcp_req->inistate) {
898 case INI_IO_START:
899 case INI_IO_ACTIVE:
900 tfcp_req->inistate = INI_IO_ABORTED;
901 break;
902 case INI_IO_COMPLETED:
903 abortio = false;
904 break;
905 default:
906 spin_unlock_irq(&tfcp_req->reqlock);
907 WARN_ON(1);
908 return;
909 }
910 spin_unlock_irq(&tfcp_req->reqlock);
911
912 if (abortio)
913
914 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
915 else {
916
917
918
919
920 fcloop_tfcp_req_put(tfcp_req);
921 }
922}
923
924static void
925fcloop_nport_free(struct kref *ref)
926{
927 struct fcloop_nport *nport =
928 container_of(ref, struct fcloop_nport, ref);
929 unsigned long flags;
930
931 spin_lock_irqsave(&fcloop_lock, flags);
932 list_del(&nport->nport_list);
933 spin_unlock_irqrestore(&fcloop_lock, flags);
934
935 kfree(nport);
936}
937
938static void
939fcloop_nport_put(struct fcloop_nport *nport)
940{
941 kref_put(&nport->ref, fcloop_nport_free);
942}
943
944static int
945fcloop_nport_get(struct fcloop_nport *nport)
946{
947 return kref_get_unless_zero(&nport->ref);
948}
949
950static void
951fcloop_localport_delete(struct nvme_fc_local_port *localport)
952{
953 struct fcloop_lport_priv *lport_priv = localport->private;
954 struct fcloop_lport *lport = lport_priv->lport;
955
956
957 complete(&lport->unreg_done);
958}
959
960static void
961fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
962{
963 struct fcloop_rport *rport = remoteport->private;
964
965 flush_work(&rport->ls_work);
966 fcloop_nport_put(rport->nport);
967}
968
969static void
970fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
971{
972 struct fcloop_tport *tport = targetport->private;
973
974 flush_work(&tport->ls_work);
975 fcloop_nport_put(tport->nport);
976}
977
978#define FCLOOP_HW_QUEUES 4
979#define FCLOOP_SGL_SEGS 256
980#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
981
982static struct nvme_fc_port_template fctemplate = {
983 .localport_delete = fcloop_localport_delete,
984 .remoteport_delete = fcloop_remoteport_delete,
985 .create_queue = fcloop_create_queue,
986 .delete_queue = fcloop_delete_queue,
987 .ls_req = fcloop_h2t_ls_req,
988 .fcp_io = fcloop_fcp_req,
989 .ls_abort = fcloop_h2t_ls_abort,
990 .fcp_abort = fcloop_fcp_abort,
991 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
992 .max_hw_queues = FCLOOP_HW_QUEUES,
993 .max_sgl_segments = FCLOOP_SGL_SEGS,
994 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
995 .dma_boundary = FCLOOP_DMABOUND_4G,
996
997 .local_priv_sz = sizeof(struct fcloop_lport_priv),
998 .remote_priv_sz = sizeof(struct fcloop_rport),
999 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1000 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1001};
1002
1003static struct nvmet_fc_target_template tgttemplate = {
1004 .targetport_delete = fcloop_targetport_delete,
1005 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1006 .fcp_op = fcloop_fcp_op,
1007 .fcp_abort = fcloop_tgt_fcp_abort,
1008 .fcp_req_release = fcloop_fcp_req_release,
1009 .discovery_event = fcloop_tgt_discovery_evt,
1010 .ls_req = fcloop_t2h_ls_req,
1011 .ls_abort = fcloop_t2h_ls_abort,
1012 .host_release = fcloop_t2h_host_release,
1013 .max_hw_queues = FCLOOP_HW_QUEUES,
1014 .max_sgl_segments = FCLOOP_SGL_SEGS,
1015 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1016 .dma_boundary = FCLOOP_DMABOUND_4G,
1017
1018 .target_features = 0,
1019
1020 .target_priv_sz = sizeof(struct fcloop_tport),
1021 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1022};
1023
1024static ssize_t
1025fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1026 const char *buf, size_t count)
1027{
1028 struct nvme_fc_port_info pinfo;
1029 struct fcloop_ctrl_options *opts;
1030 struct nvme_fc_local_port *localport;
1031 struct fcloop_lport *lport;
1032 struct fcloop_lport_priv *lport_priv;
1033 unsigned long flags;
1034 int ret = -ENOMEM;
1035
1036 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1037 if (!lport)
1038 return -ENOMEM;
1039
1040 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1041 if (!opts)
1042 goto out_free_lport;
1043
1044 ret = fcloop_parse_options(opts, buf);
1045 if (ret)
1046 goto out_free_opts;
1047
1048
1049 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1050 ret = -EINVAL;
1051 goto out_free_opts;
1052 }
1053
1054 memset(&pinfo, 0, sizeof(pinfo));
1055 pinfo.node_name = opts->wwnn;
1056 pinfo.port_name = opts->wwpn;
1057 pinfo.port_role = opts->roles;
1058 pinfo.port_id = opts->fcaddr;
1059
1060 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1061 if (!ret) {
1062
1063 lport_priv = localport->private;
1064 lport_priv->lport = lport;
1065
1066 lport->localport = localport;
1067 INIT_LIST_HEAD(&lport->lport_list);
1068
1069 spin_lock_irqsave(&fcloop_lock, flags);
1070 list_add_tail(&lport->lport_list, &fcloop_lports);
1071 spin_unlock_irqrestore(&fcloop_lock, flags);
1072 }
1073
1074out_free_opts:
1075 kfree(opts);
1076out_free_lport:
1077
1078 if (ret)
1079 kfree(lport);
1080
1081 return ret ? ret : count;
1082}
1083
1084
1085static void
1086__unlink_local_port(struct fcloop_lport *lport)
1087{
1088 list_del(&lport->lport_list);
1089}
1090
1091static int
1092__wait_localport_unreg(struct fcloop_lport *lport)
1093{
1094 int ret;
1095
1096 init_completion(&lport->unreg_done);
1097
1098 ret = nvme_fc_unregister_localport(lport->localport);
1099
1100 wait_for_completion(&lport->unreg_done);
1101
1102 kfree(lport);
1103
1104 return ret;
1105}
1106
1107
1108static ssize_t
1109fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1110 const char *buf, size_t count)
1111{
1112 struct fcloop_lport *tlport, *lport = NULL;
1113 u64 nodename, portname;
1114 unsigned long flags;
1115 int ret;
1116
1117 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1118 if (ret)
1119 return ret;
1120
1121 spin_lock_irqsave(&fcloop_lock, flags);
1122
1123 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1124 if (tlport->localport->node_name == nodename &&
1125 tlport->localport->port_name == portname) {
1126 lport = tlport;
1127 __unlink_local_port(lport);
1128 break;
1129 }
1130 }
1131 spin_unlock_irqrestore(&fcloop_lock, flags);
1132
1133 if (!lport)
1134 return -ENOENT;
1135
1136 ret = __wait_localport_unreg(lport);
1137
1138 return ret ? ret : count;
1139}
1140
1141static struct fcloop_nport *
1142fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1143{
1144 struct fcloop_nport *newnport, *nport = NULL;
1145 struct fcloop_lport *tmplport, *lport = NULL;
1146 struct fcloop_ctrl_options *opts;
1147 unsigned long flags;
1148 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1149 int ret;
1150
1151 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1152 if (!opts)
1153 return NULL;
1154
1155 ret = fcloop_parse_options(opts, buf);
1156 if (ret)
1157 goto out_free_opts;
1158
1159
1160 if ((opts->mask & opts_mask) != opts_mask) {
1161 ret = -EINVAL;
1162 goto out_free_opts;
1163 }
1164
1165 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1166 if (!newnport)
1167 goto out_free_opts;
1168
1169 INIT_LIST_HEAD(&newnport->nport_list);
1170 newnport->node_name = opts->wwnn;
1171 newnport->port_name = opts->wwpn;
1172 if (opts->mask & NVMF_OPT_ROLES)
1173 newnport->port_role = opts->roles;
1174 if (opts->mask & NVMF_OPT_FCADDR)
1175 newnport->port_id = opts->fcaddr;
1176 kref_init(&newnport->ref);
1177
1178 spin_lock_irqsave(&fcloop_lock, flags);
1179
1180 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1181 if (tmplport->localport->node_name == opts->wwnn &&
1182 tmplport->localport->port_name == opts->wwpn)
1183 goto out_invalid_opts;
1184
1185 if (tmplport->localport->node_name == opts->lpwwnn &&
1186 tmplport->localport->port_name == opts->lpwwpn)
1187 lport = tmplport;
1188 }
1189
1190 if (remoteport) {
1191 if (!lport)
1192 goto out_invalid_opts;
1193 newnport->lport = lport;
1194 }
1195
1196 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1197 if (nport->node_name == opts->wwnn &&
1198 nport->port_name == opts->wwpn) {
1199 if ((remoteport && nport->rport) ||
1200 (!remoteport && nport->tport)) {
1201 nport = NULL;
1202 goto out_invalid_opts;
1203 }
1204
1205 fcloop_nport_get(nport);
1206
1207 spin_unlock_irqrestore(&fcloop_lock, flags);
1208
1209 if (remoteport)
1210 nport->lport = lport;
1211 if (opts->mask & NVMF_OPT_ROLES)
1212 nport->port_role = opts->roles;
1213 if (opts->mask & NVMF_OPT_FCADDR)
1214 nport->port_id = opts->fcaddr;
1215 goto out_free_newnport;
1216 }
1217 }
1218
1219 list_add_tail(&newnport->nport_list, &fcloop_nports);
1220
1221 spin_unlock_irqrestore(&fcloop_lock, flags);
1222
1223 kfree(opts);
1224 return newnport;
1225
1226out_invalid_opts:
1227 spin_unlock_irqrestore(&fcloop_lock, flags);
1228out_free_newnport:
1229 kfree(newnport);
1230out_free_opts:
1231 kfree(opts);
1232 return nport;
1233}
1234
1235static ssize_t
1236fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1237 const char *buf, size_t count)
1238{
1239 struct nvme_fc_remote_port *remoteport;
1240 struct fcloop_nport *nport;
1241 struct fcloop_rport *rport;
1242 struct nvme_fc_port_info pinfo;
1243 int ret;
1244
1245 nport = fcloop_alloc_nport(buf, count, true);
1246 if (!nport)
1247 return -EIO;
1248
1249 memset(&pinfo, 0, sizeof(pinfo));
1250 pinfo.node_name = nport->node_name;
1251 pinfo.port_name = nport->port_name;
1252 pinfo.port_role = nport->port_role;
1253 pinfo.port_id = nport->port_id;
1254
1255 ret = nvme_fc_register_remoteport(nport->lport->localport,
1256 &pinfo, &remoteport);
1257 if (ret || !remoteport) {
1258 fcloop_nport_put(nport);
1259 return ret;
1260 }
1261
1262
1263 rport = remoteport->private;
1264 rport->remoteport = remoteport;
1265 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1266 if (nport->tport) {
1267 nport->tport->remoteport = remoteport;
1268 nport->tport->lport = nport->lport;
1269 }
1270 rport->nport = nport;
1271 rport->lport = nport->lport;
1272 nport->rport = rport;
1273 spin_lock_init(&rport->lock);
1274 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1275 INIT_LIST_HEAD(&rport->ls_list);
1276
1277 return count;
1278}
1279
1280
1281static struct fcloop_rport *
1282__unlink_remote_port(struct fcloop_nport *nport)
1283{
1284 struct fcloop_rport *rport = nport->rport;
1285
1286 if (rport && nport->tport)
1287 nport->tport->remoteport = NULL;
1288 nport->rport = NULL;
1289
1290 return rport;
1291}
1292
1293static int
1294__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1295{
1296 if (!rport)
1297 return -EALREADY;
1298
1299 return nvme_fc_unregister_remoteport(rport->remoteport);
1300}
1301
1302static ssize_t
1303fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1304 const char *buf, size_t count)
1305{
1306 struct fcloop_nport *nport = NULL, *tmpport;
1307 static struct fcloop_rport *rport;
1308 u64 nodename, portname;
1309 unsigned long flags;
1310 int ret;
1311
1312 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1313 if (ret)
1314 return ret;
1315
1316 spin_lock_irqsave(&fcloop_lock, flags);
1317
1318 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1319 if (tmpport->node_name == nodename &&
1320 tmpport->port_name == portname && tmpport->rport) {
1321 nport = tmpport;
1322 rport = __unlink_remote_port(nport);
1323 break;
1324 }
1325 }
1326
1327 spin_unlock_irqrestore(&fcloop_lock, flags);
1328
1329 if (!nport)
1330 return -ENOENT;
1331
1332 ret = __remoteport_unreg(nport, rport);
1333
1334 return ret ? ret : count;
1335}
1336
1337static ssize_t
1338fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1339 const char *buf, size_t count)
1340{
1341 struct nvmet_fc_target_port *targetport;
1342 struct fcloop_nport *nport;
1343 struct fcloop_tport *tport;
1344 struct nvmet_fc_port_info tinfo;
1345 int ret;
1346
1347 nport = fcloop_alloc_nport(buf, count, false);
1348 if (!nport)
1349 return -EIO;
1350
1351 tinfo.node_name = nport->node_name;
1352 tinfo.port_name = nport->port_name;
1353 tinfo.port_id = nport->port_id;
1354
1355 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1356 &targetport);
1357 if (ret) {
1358 fcloop_nport_put(nport);
1359 return ret;
1360 }
1361
1362
1363 tport = targetport->private;
1364 tport->targetport = targetport;
1365 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1366 if (nport->rport)
1367 nport->rport->targetport = targetport;
1368 tport->nport = nport;
1369 tport->lport = nport->lport;
1370 nport->tport = tport;
1371 spin_lock_init(&tport->lock);
1372 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1373 INIT_LIST_HEAD(&tport->ls_list);
1374
1375 return count;
1376}
1377
1378
1379static struct fcloop_tport *
1380__unlink_target_port(struct fcloop_nport *nport)
1381{
1382 struct fcloop_tport *tport = nport->tport;
1383
1384 if (tport && nport->rport)
1385 nport->rport->targetport = NULL;
1386 nport->tport = NULL;
1387
1388 return tport;
1389}
1390
1391static int
1392__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1393{
1394 if (!tport)
1395 return -EALREADY;
1396
1397 return nvmet_fc_unregister_targetport(tport->targetport);
1398}
1399
1400static ssize_t
1401fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1402 const char *buf, size_t count)
1403{
1404 struct fcloop_nport *nport = NULL, *tmpport;
1405 struct fcloop_tport *tport = NULL;
1406 u64 nodename, portname;
1407 unsigned long flags;
1408 int ret;
1409
1410 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1411 if (ret)
1412 return ret;
1413
1414 spin_lock_irqsave(&fcloop_lock, flags);
1415
1416 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1417 if (tmpport->node_name == nodename &&
1418 tmpport->port_name == portname && tmpport->tport) {
1419 nport = tmpport;
1420 tport = __unlink_target_port(nport);
1421 break;
1422 }
1423 }
1424
1425 spin_unlock_irqrestore(&fcloop_lock, flags);
1426
1427 if (!nport)
1428 return -ENOENT;
1429
1430 ret = __targetport_unreg(nport, tport);
1431
1432 return ret ? ret : count;
1433}
1434
1435
1436static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1437static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1438static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1439static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1440static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1441static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1442
1443static struct attribute *fcloop_dev_attrs[] = {
1444 &dev_attr_add_local_port.attr,
1445 &dev_attr_del_local_port.attr,
1446 &dev_attr_add_remote_port.attr,
1447 &dev_attr_del_remote_port.attr,
1448 &dev_attr_add_target_port.attr,
1449 &dev_attr_del_target_port.attr,
1450 NULL
1451};
1452
1453static struct attribute_group fclopp_dev_attrs_group = {
1454 .attrs = fcloop_dev_attrs,
1455};
1456
1457static const struct attribute_group *fcloop_dev_attr_groups[] = {
1458 &fclopp_dev_attrs_group,
1459 NULL,
1460};
1461
1462static struct class *fcloop_class;
1463static struct device *fcloop_device;
1464
1465
1466static int __init fcloop_init(void)
1467{
1468 int ret;
1469
1470 fcloop_class = class_create(THIS_MODULE, "fcloop");
1471 if (IS_ERR(fcloop_class)) {
1472 pr_err("couldn't register class fcloop\n");
1473 ret = PTR_ERR(fcloop_class);
1474 return ret;
1475 }
1476
1477 fcloop_device = device_create_with_groups(
1478 fcloop_class, NULL, MKDEV(0, 0), NULL,
1479 fcloop_dev_attr_groups, "ctl");
1480 if (IS_ERR(fcloop_device)) {
1481 pr_err("couldn't create ctl device!\n");
1482 ret = PTR_ERR(fcloop_device);
1483 goto out_destroy_class;
1484 }
1485
1486 get_device(fcloop_device);
1487
1488 return 0;
1489
1490out_destroy_class:
1491 class_destroy(fcloop_class);
1492 return ret;
1493}
1494
1495static void __exit fcloop_exit(void)
1496{
1497 struct fcloop_lport *lport;
1498 struct fcloop_nport *nport;
1499 struct fcloop_tport *tport;
1500 struct fcloop_rport *rport;
1501 unsigned long flags;
1502 int ret;
1503
1504 spin_lock_irqsave(&fcloop_lock, flags);
1505
1506 for (;;) {
1507 nport = list_first_entry_or_null(&fcloop_nports,
1508 typeof(*nport), nport_list);
1509 if (!nport)
1510 break;
1511
1512 tport = __unlink_target_port(nport);
1513 rport = __unlink_remote_port(nport);
1514
1515 spin_unlock_irqrestore(&fcloop_lock, flags);
1516
1517 ret = __targetport_unreg(nport, tport);
1518 if (ret)
1519 pr_warn("%s: Failed deleting target port\n", __func__);
1520
1521 ret = __remoteport_unreg(nport, rport);
1522 if (ret)
1523 pr_warn("%s: Failed deleting remote port\n", __func__);
1524
1525 spin_lock_irqsave(&fcloop_lock, flags);
1526 }
1527
1528 for (;;) {
1529 lport = list_first_entry_or_null(&fcloop_lports,
1530 typeof(*lport), lport_list);
1531 if (!lport)
1532 break;
1533
1534 __unlink_local_port(lport);
1535
1536 spin_unlock_irqrestore(&fcloop_lock, flags);
1537
1538 ret = __wait_localport_unreg(lport);
1539 if (ret)
1540 pr_warn("%s: Failed deleting local port\n", __func__);
1541
1542 spin_lock_irqsave(&fcloop_lock, flags);
1543 }
1544
1545 spin_unlock_irqrestore(&fcloop_lock, flags);
1546
1547 put_device(fcloop_device);
1548
1549 device_destroy(fcloop_class, MKDEV(0, 0));
1550 class_destroy(fcloop_class);
1551}
1552
1553module_init(fcloop_init);
1554module_exit(fcloop_exit);
1555
1556MODULE_LICENSE("GPL v2");
1557