1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9
10#include "../host/nvme.h"
11#include "../target/nvmet.h"
12#include <linux/nvme-fc-driver.h>
13#include <linux/nvme-fc.h>
14
15
16enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24};
25
26struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34};
35
36static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44};
45
46static int
47fcloop_parse_options(struct fcloop_ctrl_options *opts,
48 const char *buf)
49{
50 substring_t args[MAX_OPT_ARGS];
51 char *options, *o, *p;
52 int token, ret = 0;
53 u64 token64;
54
55 options = o = kstrdup(buf, GFP_KERNEL);
56 if (!options)
57 return -ENOMEM;
58
59 while ((p = strsep(&o, ",\n")) != NULL) {
60 if (!*p)
61 continue;
62
63 token = match_token(p, opt_tokens, args);
64 opts->mask |= token;
65 switch (token) {
66 case NVMF_OPT_WWNN:
67 if (match_u64(args, &token64)) {
68 ret = -EINVAL;
69 goto out_free_options;
70 }
71 opts->wwnn = token64;
72 break;
73 case NVMF_OPT_WWPN:
74 if (match_u64(args, &token64)) {
75 ret = -EINVAL;
76 goto out_free_options;
77 }
78 opts->wwpn = token64;
79 break;
80 case NVMF_OPT_ROLES:
81 if (match_int(args, &token)) {
82 ret = -EINVAL;
83 goto out_free_options;
84 }
85 opts->roles = token;
86 break;
87 case NVMF_OPT_FCADDR:
88 if (match_hex(args, &token)) {
89 ret = -EINVAL;
90 goto out_free_options;
91 }
92 opts->fcaddr = token;
93 break;
94 case NVMF_OPT_LPWWNN:
95 if (match_u64(args, &token64)) {
96 ret = -EINVAL;
97 goto out_free_options;
98 }
99 opts->lpwwnn = token64;
100 break;
101 case NVMF_OPT_LPWWPN:
102 if (match_u64(args, &token64)) {
103 ret = -EINVAL;
104 goto out_free_options;
105 }
106 opts->lpwwpn = token64;
107 break;
108 default:
109 pr_warn("unknown parameter or missing value '%s'\n", p);
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 }
114
115out_free_options:
116 kfree(options);
117 return ret;
118}
119
120
121static int
122fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
123 const char *buf)
124{
125 substring_t args[MAX_OPT_ARGS];
126 char *options, *o, *p;
127 int token, ret = 0;
128 u64 token64;
129
130 *nname = -1;
131 *pname = -1;
132
133 options = o = kstrdup(buf, GFP_KERNEL);
134 if (!options)
135 return -ENOMEM;
136
137 while ((p = strsep(&o, ",\n")) != NULL) {
138 if (!*p)
139 continue;
140
141 token = match_token(p, opt_tokens, args);
142 switch (token) {
143 case NVMF_OPT_WWNN:
144 if (match_u64(args, &token64)) {
145 ret = -EINVAL;
146 goto out_free_options;
147 }
148 *nname = token64;
149 break;
150 case NVMF_OPT_WWPN:
151 if (match_u64(args, &token64)) {
152 ret = -EINVAL;
153 goto out_free_options;
154 }
155 *pname = token64;
156 break;
157 default:
158 pr_warn("unknown parameter or missing value '%s'\n", p);
159 ret = -EINVAL;
160 goto out_free_options;
161 }
162 }
163
164out_free_options:
165 kfree(options);
166
167 if (!ret) {
168 if (*nname == -1)
169 return -EINVAL;
170 if (*pname == -1)
171 return -EINVAL;
172 }
173
174 return ret;
175}
176
177
178#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
179
180#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
182
183#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
184
185
186static DEFINE_SPINLOCK(fcloop_lock);
187static LIST_HEAD(fcloop_lports);
188static LIST_HEAD(fcloop_nports);
189
190struct fcloop_lport {
191 struct nvme_fc_local_port *localport;
192 struct list_head lport_list;
193 struct completion unreg_done;
194};
195
196struct fcloop_lport_priv {
197 struct fcloop_lport *lport;
198};
199
200struct fcloop_rport {
201 struct nvme_fc_remote_port *remoteport;
202 struct nvmet_fc_target_port *targetport;
203 struct fcloop_nport *nport;
204 struct fcloop_lport *lport;
205};
206
207struct fcloop_tport {
208 struct nvmet_fc_target_port *targetport;
209 struct nvme_fc_remote_port *remoteport;
210 struct fcloop_nport *nport;
211 struct fcloop_lport *lport;
212};
213
214struct fcloop_nport {
215 struct fcloop_rport *rport;
216 struct fcloop_tport *tport;
217 struct fcloop_lport *lport;
218 struct list_head nport_list;
219 struct kref ref;
220 u64 node_name;
221 u64 port_name;
222 u32 port_role;
223 u32 port_id;
224};
225
226struct fcloop_lsreq {
227 struct fcloop_tport *tport;
228 struct nvmefc_ls_req *lsreq;
229 struct work_struct work;
230 struct nvmefc_tgt_ls_req tgt_ls_req;
231 int status;
232};
233
234struct fcloop_rscn {
235 struct fcloop_tport *tport;
236 struct work_struct work;
237};
238
239enum {
240 INI_IO_START = 0,
241 INI_IO_ACTIVE = 1,
242 INI_IO_ABORTED = 2,
243 INI_IO_COMPLETED = 3,
244};
245
246struct fcloop_fcpreq {
247 struct fcloop_tport *tport;
248 struct nvmefc_fcp_req *fcpreq;
249 spinlock_t reqlock;
250 u16 status;
251 u32 inistate;
252 bool active;
253 bool aborted;
254 struct kref ref;
255 struct work_struct fcp_rcv_work;
256 struct work_struct abort_rcv_work;
257 struct work_struct tio_done_work;
258 struct nvmefc_tgt_fcp_req tgt_fcp_req;
259};
260
261struct fcloop_ini_fcpreq {
262 struct nvmefc_fcp_req *fcpreq;
263 struct fcloop_fcpreq *tfcp_req;
264 spinlock_t inilock;
265};
266
267static inline struct fcloop_lsreq *
268tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
269{
270 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
271}
272
273static inline struct fcloop_fcpreq *
274tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
275{
276 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
277}
278
279
280static int
281fcloop_create_queue(struct nvme_fc_local_port *localport,
282 unsigned int qidx, u16 qsize,
283 void **handle)
284{
285 *handle = localport;
286 return 0;
287}
288
289static void
290fcloop_delete_queue(struct nvme_fc_local_port *localport,
291 unsigned int idx, void *handle)
292{
293}
294
295
296
297
298
299
300static void
301fcloop_tgt_lsrqst_done_work(struct work_struct *work)
302{
303 struct fcloop_lsreq *tls_req =
304 container_of(work, struct fcloop_lsreq, work);
305 struct fcloop_tport *tport = tls_req->tport;
306 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
307
308 if (!tport || tport->remoteport)
309 lsreq->done(lsreq, tls_req->status);
310}
311
312static int
313fcloop_ls_req(struct nvme_fc_local_port *localport,
314 struct nvme_fc_remote_port *remoteport,
315 struct nvmefc_ls_req *lsreq)
316{
317 struct fcloop_lsreq *tls_req = lsreq->private;
318 struct fcloop_rport *rport = remoteport->private;
319 int ret = 0;
320
321 tls_req->lsreq = lsreq;
322 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
323
324 if (!rport->targetport) {
325 tls_req->status = -ECONNREFUSED;
326 tls_req->tport = NULL;
327 schedule_work(&tls_req->work);
328 return ret;
329 }
330
331 tls_req->status = 0;
332 tls_req->tport = rport->targetport->private;
333 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
334 lsreq->rqstaddr, lsreq->rqstlen);
335
336 return ret;
337}
338
339static int
340fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
341 struct nvmefc_tgt_ls_req *tgt_lsreq)
342{
343 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
344 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
345
346 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
347 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
348 lsreq->rsplen : tgt_lsreq->rsplen));
349 tgt_lsreq->done(tgt_lsreq);
350
351 schedule_work(&tls_req->work);
352
353 return 0;
354}
355
356
357
358
359
360static void
361fcloop_tgt_rscn_work(struct work_struct *work)
362{
363 struct fcloop_rscn *tgt_rscn =
364 container_of(work, struct fcloop_rscn, work);
365 struct fcloop_tport *tport = tgt_rscn->tport;
366
367 if (tport->remoteport)
368 nvme_fc_rescan_remoteport(tport->remoteport);
369 kfree(tgt_rscn);
370}
371
372static void
373fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
374{
375 struct fcloop_rscn *tgt_rscn;
376
377 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
378 if (!tgt_rscn)
379 return;
380
381 tgt_rscn->tport = tgtport->private;
382 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
383
384 schedule_work(&tgt_rscn->work);
385}
386
387static void
388fcloop_tfcp_req_free(struct kref *ref)
389{
390 struct fcloop_fcpreq *tfcp_req =
391 container_of(ref, struct fcloop_fcpreq, ref);
392
393 kfree(tfcp_req);
394}
395
396static void
397fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
398{
399 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
400}
401
402static int
403fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
404{
405 return kref_get_unless_zero(&tfcp_req->ref);
406}
407
408static void
409fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
410 struct fcloop_fcpreq *tfcp_req, int status)
411{
412 struct fcloop_ini_fcpreq *inireq = NULL;
413
414 if (fcpreq) {
415 inireq = fcpreq->private;
416 spin_lock(&inireq->inilock);
417 inireq->tfcp_req = NULL;
418 spin_unlock(&inireq->inilock);
419
420 fcpreq->status = status;
421 fcpreq->done(fcpreq);
422 }
423
424
425 fcloop_tfcp_req_put(tfcp_req);
426}
427
428static void
429fcloop_fcp_recv_work(struct work_struct *work)
430{
431 struct fcloop_fcpreq *tfcp_req =
432 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
433 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
434 int ret = 0;
435 bool aborted = false;
436
437 spin_lock_irq(&tfcp_req->reqlock);
438 switch (tfcp_req->inistate) {
439 case INI_IO_START:
440 tfcp_req->inistate = INI_IO_ACTIVE;
441 break;
442 case INI_IO_ABORTED:
443 aborted = true;
444 break;
445 default:
446 spin_unlock_irq(&tfcp_req->reqlock);
447 WARN_ON(1);
448 return;
449 }
450 spin_unlock_irq(&tfcp_req->reqlock);
451
452 if (unlikely(aborted))
453 ret = -ECANCELED;
454 else
455 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
456 &tfcp_req->tgt_fcp_req,
457 fcpreq->cmdaddr, fcpreq->cmdlen);
458 if (ret)
459 fcloop_call_host_done(fcpreq, tfcp_req, ret);
460
461 return;
462}
463
464static void
465fcloop_fcp_abort_recv_work(struct work_struct *work)
466{
467 struct fcloop_fcpreq *tfcp_req =
468 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
469 struct nvmefc_fcp_req *fcpreq;
470 bool completed = false;
471
472 spin_lock_irq(&tfcp_req->reqlock);
473 fcpreq = tfcp_req->fcpreq;
474 switch (tfcp_req->inistate) {
475 case INI_IO_ABORTED:
476 break;
477 case INI_IO_COMPLETED:
478 completed = true;
479 break;
480 default:
481 spin_unlock_irq(&tfcp_req->reqlock);
482 WARN_ON(1);
483 return;
484 }
485 spin_unlock_irq(&tfcp_req->reqlock);
486
487 if (unlikely(completed)) {
488
489 fcloop_tfcp_req_put(tfcp_req);
490 return;
491 }
492
493 if (tfcp_req->tport->targetport)
494 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
495 &tfcp_req->tgt_fcp_req);
496
497 spin_lock_irq(&tfcp_req->reqlock);
498 tfcp_req->fcpreq = NULL;
499 spin_unlock_irq(&tfcp_req->reqlock);
500
501 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
502
503}
504
505
506
507
508
509static void
510fcloop_tgt_fcprqst_done_work(struct work_struct *work)
511{
512 struct fcloop_fcpreq *tfcp_req =
513 container_of(work, struct fcloop_fcpreq, tio_done_work);
514 struct nvmefc_fcp_req *fcpreq;
515
516 spin_lock_irq(&tfcp_req->reqlock);
517 fcpreq = tfcp_req->fcpreq;
518 tfcp_req->inistate = INI_IO_COMPLETED;
519 spin_unlock_irq(&tfcp_req->reqlock);
520
521 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
522}
523
524
525static int
526fcloop_fcp_req(struct nvme_fc_local_port *localport,
527 struct nvme_fc_remote_port *remoteport,
528 void *hw_queue_handle,
529 struct nvmefc_fcp_req *fcpreq)
530{
531 struct fcloop_rport *rport = remoteport->private;
532 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
533 struct fcloop_fcpreq *tfcp_req;
534
535 if (!rport->targetport)
536 return -ECONNREFUSED;
537
538 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
539 if (!tfcp_req)
540 return -ENOMEM;
541
542 inireq->fcpreq = fcpreq;
543 inireq->tfcp_req = tfcp_req;
544 spin_lock_init(&inireq->inilock);
545
546 tfcp_req->fcpreq = fcpreq;
547 tfcp_req->tport = rport->targetport->private;
548 tfcp_req->inistate = INI_IO_START;
549 spin_lock_init(&tfcp_req->reqlock);
550 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
551 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
552 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
553 kref_init(&tfcp_req->ref);
554
555 schedule_work(&tfcp_req->fcp_rcv_work);
556
557 return 0;
558}
559
560static void
561fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
562 struct scatterlist *io_sg, u32 offset, u32 length)
563{
564 void *data_p, *io_p;
565 u32 data_len, io_len, tlen;
566
567 io_p = sg_virt(io_sg);
568 io_len = io_sg->length;
569
570 for ( ; offset; ) {
571 tlen = min_t(u32, offset, io_len);
572 offset -= tlen;
573 io_len -= tlen;
574 if (!io_len) {
575 io_sg = sg_next(io_sg);
576 io_p = sg_virt(io_sg);
577 io_len = io_sg->length;
578 } else
579 io_p += tlen;
580 }
581
582 data_p = sg_virt(data_sg);
583 data_len = data_sg->length;
584
585 for ( ; length; ) {
586 tlen = min_t(u32, io_len, data_len);
587 tlen = min_t(u32, tlen, length);
588
589 if (op == NVMET_FCOP_WRITEDATA)
590 memcpy(data_p, io_p, tlen);
591 else
592 memcpy(io_p, data_p, tlen);
593
594 length -= tlen;
595
596 io_len -= tlen;
597 if ((!io_len) && (length)) {
598 io_sg = sg_next(io_sg);
599 io_p = sg_virt(io_sg);
600 io_len = io_sg->length;
601 } else
602 io_p += tlen;
603
604 data_len -= tlen;
605 if ((!data_len) && (length)) {
606 data_sg = sg_next(data_sg);
607 data_p = sg_virt(data_sg);
608 data_len = data_sg->length;
609 } else
610 data_p += tlen;
611 }
612}
613
614static int
615fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
616 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
617{
618 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
619 struct nvmefc_fcp_req *fcpreq;
620 u32 rsplen = 0, xfrlen = 0;
621 int fcp_err = 0, active, aborted;
622 u8 op = tgt_fcpreq->op;
623
624 spin_lock_irq(&tfcp_req->reqlock);
625 fcpreq = tfcp_req->fcpreq;
626 active = tfcp_req->active;
627 aborted = tfcp_req->aborted;
628 tfcp_req->active = true;
629 spin_unlock_irq(&tfcp_req->reqlock);
630
631 if (unlikely(active))
632
633 return -EALREADY;
634
635 if (unlikely(aborted)) {
636
637 spin_lock_irq(&tfcp_req->reqlock);
638 tfcp_req->active = false;
639 spin_unlock_irq(&tfcp_req->reqlock);
640 tgt_fcpreq->transferred_length = 0;
641 tgt_fcpreq->fcp_error = -ECANCELED;
642 tgt_fcpreq->done(tgt_fcpreq);
643 return 0;
644 }
645
646
647
648
649
650
651
652 switch (op) {
653 case NVMET_FCOP_WRITEDATA:
654 xfrlen = tgt_fcpreq->transfer_length;
655 if (fcpreq) {
656 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
657 fcpreq->first_sgl, tgt_fcpreq->offset,
658 xfrlen);
659 fcpreq->transferred_length += xfrlen;
660 }
661 break;
662
663 case NVMET_FCOP_READDATA:
664 case NVMET_FCOP_READDATA_RSP:
665 xfrlen = tgt_fcpreq->transfer_length;
666 if (fcpreq) {
667 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
668 fcpreq->first_sgl, tgt_fcpreq->offset,
669 xfrlen);
670 fcpreq->transferred_length += xfrlen;
671 }
672 if (op == NVMET_FCOP_READDATA)
673 break;
674
675
676
677
678 case NVMET_FCOP_RSP:
679 if (fcpreq) {
680 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
681 fcpreq->rsplen : tgt_fcpreq->rsplen);
682 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
683 if (rsplen < tgt_fcpreq->rsplen)
684 fcp_err = -E2BIG;
685 fcpreq->rcv_rsplen = rsplen;
686 fcpreq->status = 0;
687 }
688 tfcp_req->status = 0;
689 break;
690
691 default:
692 fcp_err = -EINVAL;
693 break;
694 }
695
696 spin_lock_irq(&tfcp_req->reqlock);
697 tfcp_req->active = false;
698 spin_unlock_irq(&tfcp_req->reqlock);
699
700 tgt_fcpreq->transferred_length = xfrlen;
701 tgt_fcpreq->fcp_error = fcp_err;
702 tgt_fcpreq->done(tgt_fcpreq);
703
704 return 0;
705}
706
707static void
708fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
709 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
710{
711 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
712
713
714
715
716
717
718 spin_lock_irq(&tfcp_req->reqlock);
719 tfcp_req->aborted = true;
720 spin_unlock_irq(&tfcp_req->reqlock);
721
722 tfcp_req->status = NVME_SC_INTERNAL;
723
724
725
726
727
728
729}
730
731static void
732fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
733 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
734{
735 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
736
737 schedule_work(&tfcp_req->tio_done_work);
738}
739
740static void
741fcloop_ls_abort(struct nvme_fc_local_port *localport,
742 struct nvme_fc_remote_port *remoteport,
743 struct nvmefc_ls_req *lsreq)
744{
745}
746
747static void
748fcloop_fcp_abort(struct nvme_fc_local_port *localport,
749 struct nvme_fc_remote_port *remoteport,
750 void *hw_queue_handle,
751 struct nvmefc_fcp_req *fcpreq)
752{
753 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
754 struct fcloop_fcpreq *tfcp_req;
755 bool abortio = true;
756
757 spin_lock(&inireq->inilock);
758 tfcp_req = inireq->tfcp_req;
759 if (tfcp_req)
760 fcloop_tfcp_req_get(tfcp_req);
761 spin_unlock(&inireq->inilock);
762
763 if (!tfcp_req)
764
765 return;
766
767
768 spin_lock_irq(&tfcp_req->reqlock);
769 switch (tfcp_req->inistate) {
770 case INI_IO_START:
771 case INI_IO_ACTIVE:
772 tfcp_req->inistate = INI_IO_ABORTED;
773 break;
774 case INI_IO_COMPLETED:
775 abortio = false;
776 break;
777 default:
778 spin_unlock_irq(&tfcp_req->reqlock);
779 WARN_ON(1);
780 return;
781 }
782 spin_unlock_irq(&tfcp_req->reqlock);
783
784 if (abortio)
785
786 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
787 else {
788
789
790
791
792 fcloop_tfcp_req_put(tfcp_req);
793 }
794}
795
796static void
797fcloop_nport_free(struct kref *ref)
798{
799 struct fcloop_nport *nport =
800 container_of(ref, struct fcloop_nport, ref);
801 unsigned long flags;
802
803 spin_lock_irqsave(&fcloop_lock, flags);
804 list_del(&nport->nport_list);
805 spin_unlock_irqrestore(&fcloop_lock, flags);
806
807 kfree(nport);
808}
809
810static void
811fcloop_nport_put(struct fcloop_nport *nport)
812{
813 kref_put(&nport->ref, fcloop_nport_free);
814}
815
816static int
817fcloop_nport_get(struct fcloop_nport *nport)
818{
819 return kref_get_unless_zero(&nport->ref);
820}
821
822static void
823fcloop_localport_delete(struct nvme_fc_local_port *localport)
824{
825 struct fcloop_lport_priv *lport_priv = localport->private;
826 struct fcloop_lport *lport = lport_priv->lport;
827
828
829 complete(&lport->unreg_done);
830}
831
832static void
833fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
834{
835 struct fcloop_rport *rport = remoteport->private;
836
837 fcloop_nport_put(rport->nport);
838}
839
840static void
841fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
842{
843 struct fcloop_tport *tport = targetport->private;
844
845 fcloop_nport_put(tport->nport);
846}
847
848#define FCLOOP_HW_QUEUES 4
849#define FCLOOP_SGL_SEGS 256
850#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
851
852static struct nvme_fc_port_template fctemplate = {
853 .localport_delete = fcloop_localport_delete,
854 .remoteport_delete = fcloop_remoteport_delete,
855 .create_queue = fcloop_create_queue,
856 .delete_queue = fcloop_delete_queue,
857 .ls_req = fcloop_ls_req,
858 .fcp_io = fcloop_fcp_req,
859 .ls_abort = fcloop_ls_abort,
860 .fcp_abort = fcloop_fcp_abort,
861 .max_hw_queues = FCLOOP_HW_QUEUES,
862 .max_sgl_segments = FCLOOP_SGL_SEGS,
863 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
864 .dma_boundary = FCLOOP_DMABOUND_4G,
865
866 .local_priv_sz = sizeof(struct fcloop_lport_priv),
867 .remote_priv_sz = sizeof(struct fcloop_rport),
868 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
869 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
870};
871
872static struct nvmet_fc_target_template tgttemplate = {
873 .targetport_delete = fcloop_targetport_delete,
874 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
875 .fcp_op = fcloop_fcp_op,
876 .fcp_abort = fcloop_tgt_fcp_abort,
877 .fcp_req_release = fcloop_fcp_req_release,
878 .discovery_event = fcloop_tgt_discovery_evt,
879 .max_hw_queues = FCLOOP_HW_QUEUES,
880 .max_sgl_segments = FCLOOP_SGL_SEGS,
881 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
882 .dma_boundary = FCLOOP_DMABOUND_4G,
883
884 .target_features = 0,
885
886 .target_priv_sz = sizeof(struct fcloop_tport),
887};
888
889static ssize_t
890fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
891 const char *buf, size_t count)
892{
893 struct nvme_fc_port_info pinfo;
894 struct fcloop_ctrl_options *opts;
895 struct nvme_fc_local_port *localport;
896 struct fcloop_lport *lport;
897 struct fcloop_lport_priv *lport_priv;
898 unsigned long flags;
899 int ret = -ENOMEM;
900
901 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
902 if (!lport)
903 return -ENOMEM;
904
905 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
906 if (!opts)
907 goto out_free_lport;
908
909 ret = fcloop_parse_options(opts, buf);
910 if (ret)
911 goto out_free_opts;
912
913
914 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
915 ret = -EINVAL;
916 goto out_free_opts;
917 }
918
919 memset(&pinfo, 0, sizeof(pinfo));
920 pinfo.node_name = opts->wwnn;
921 pinfo.port_name = opts->wwpn;
922 pinfo.port_role = opts->roles;
923 pinfo.port_id = opts->fcaddr;
924
925 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
926 if (!ret) {
927
928 lport_priv = localport->private;
929 lport_priv->lport = lport;
930
931 lport->localport = localport;
932 INIT_LIST_HEAD(&lport->lport_list);
933
934 spin_lock_irqsave(&fcloop_lock, flags);
935 list_add_tail(&lport->lport_list, &fcloop_lports);
936 spin_unlock_irqrestore(&fcloop_lock, flags);
937 }
938
939out_free_opts:
940 kfree(opts);
941out_free_lport:
942
943 if (ret)
944 kfree(lport);
945
946 return ret ? ret : count;
947}
948
949
950static void
951__unlink_local_port(struct fcloop_lport *lport)
952{
953 list_del(&lport->lport_list);
954}
955
956static int
957__wait_localport_unreg(struct fcloop_lport *lport)
958{
959 int ret;
960
961 init_completion(&lport->unreg_done);
962
963 ret = nvme_fc_unregister_localport(lport->localport);
964
965 wait_for_completion(&lport->unreg_done);
966
967 kfree(lport);
968
969 return ret;
970}
971
972
973static ssize_t
974fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
975 const char *buf, size_t count)
976{
977 struct fcloop_lport *tlport, *lport = NULL;
978 u64 nodename, portname;
979 unsigned long flags;
980 int ret;
981
982 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
983 if (ret)
984 return ret;
985
986 spin_lock_irqsave(&fcloop_lock, flags);
987
988 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
989 if (tlport->localport->node_name == nodename &&
990 tlport->localport->port_name == portname) {
991 lport = tlport;
992 __unlink_local_port(lport);
993 break;
994 }
995 }
996 spin_unlock_irqrestore(&fcloop_lock, flags);
997
998 if (!lport)
999 return -ENOENT;
1000
1001 ret = __wait_localport_unreg(lport);
1002
1003 return ret ? ret : count;
1004}
1005
1006static struct fcloop_nport *
1007fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1008{
1009 struct fcloop_nport *newnport, *nport = NULL;
1010 struct fcloop_lport *tmplport, *lport = NULL;
1011 struct fcloop_ctrl_options *opts;
1012 unsigned long flags;
1013 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1014 int ret;
1015
1016 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1017 if (!opts)
1018 return NULL;
1019
1020 ret = fcloop_parse_options(opts, buf);
1021 if (ret)
1022 goto out_free_opts;
1023
1024
1025 if ((opts->mask & opts_mask) != opts_mask) {
1026 ret = -EINVAL;
1027 goto out_free_opts;
1028 }
1029
1030 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1031 if (!newnport)
1032 goto out_free_opts;
1033
1034 INIT_LIST_HEAD(&newnport->nport_list);
1035 newnport->node_name = opts->wwnn;
1036 newnport->port_name = opts->wwpn;
1037 if (opts->mask & NVMF_OPT_ROLES)
1038 newnport->port_role = opts->roles;
1039 if (opts->mask & NVMF_OPT_FCADDR)
1040 newnport->port_id = opts->fcaddr;
1041 kref_init(&newnport->ref);
1042
1043 spin_lock_irqsave(&fcloop_lock, flags);
1044
1045 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1046 if (tmplport->localport->node_name == opts->wwnn &&
1047 tmplport->localport->port_name == opts->wwpn)
1048 goto out_invalid_opts;
1049
1050 if (tmplport->localport->node_name == opts->lpwwnn &&
1051 tmplport->localport->port_name == opts->lpwwpn)
1052 lport = tmplport;
1053 }
1054
1055 if (remoteport) {
1056 if (!lport)
1057 goto out_invalid_opts;
1058 newnport->lport = lport;
1059 }
1060
1061 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1062 if (nport->node_name == opts->wwnn &&
1063 nport->port_name == opts->wwpn) {
1064 if ((remoteport && nport->rport) ||
1065 (!remoteport && nport->tport)) {
1066 nport = NULL;
1067 goto out_invalid_opts;
1068 }
1069
1070 fcloop_nport_get(nport);
1071
1072 spin_unlock_irqrestore(&fcloop_lock, flags);
1073
1074 if (remoteport)
1075 nport->lport = lport;
1076 if (opts->mask & NVMF_OPT_ROLES)
1077 nport->port_role = opts->roles;
1078 if (opts->mask & NVMF_OPT_FCADDR)
1079 nport->port_id = opts->fcaddr;
1080 goto out_free_newnport;
1081 }
1082 }
1083
1084 list_add_tail(&newnport->nport_list, &fcloop_nports);
1085
1086 spin_unlock_irqrestore(&fcloop_lock, flags);
1087
1088 kfree(opts);
1089 return newnport;
1090
1091out_invalid_opts:
1092 spin_unlock_irqrestore(&fcloop_lock, flags);
1093out_free_newnport:
1094 kfree(newnport);
1095out_free_opts:
1096 kfree(opts);
1097 return nport;
1098}
1099
1100static ssize_t
1101fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1102 const char *buf, size_t count)
1103{
1104 struct nvme_fc_remote_port *remoteport;
1105 struct fcloop_nport *nport;
1106 struct fcloop_rport *rport;
1107 struct nvme_fc_port_info pinfo;
1108 int ret;
1109
1110 nport = fcloop_alloc_nport(buf, count, true);
1111 if (!nport)
1112 return -EIO;
1113
1114 memset(&pinfo, 0, sizeof(pinfo));
1115 pinfo.node_name = nport->node_name;
1116 pinfo.port_name = nport->port_name;
1117 pinfo.port_role = nport->port_role;
1118 pinfo.port_id = nport->port_id;
1119
1120 ret = nvme_fc_register_remoteport(nport->lport->localport,
1121 &pinfo, &remoteport);
1122 if (ret || !remoteport) {
1123 fcloop_nport_put(nport);
1124 return ret;
1125 }
1126
1127
1128 rport = remoteport->private;
1129 rport->remoteport = remoteport;
1130 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1131 if (nport->tport) {
1132 nport->tport->remoteport = remoteport;
1133 nport->tport->lport = nport->lport;
1134 }
1135 rport->nport = nport;
1136 rport->lport = nport->lport;
1137 nport->rport = rport;
1138
1139 return count;
1140}
1141
1142
1143static struct fcloop_rport *
1144__unlink_remote_port(struct fcloop_nport *nport)
1145{
1146 struct fcloop_rport *rport = nport->rport;
1147
1148 if (rport && nport->tport)
1149 nport->tport->remoteport = NULL;
1150 nport->rport = NULL;
1151
1152 return rport;
1153}
1154
1155static int
1156__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1157{
1158 if (!rport)
1159 return -EALREADY;
1160
1161 return nvme_fc_unregister_remoteport(rport->remoteport);
1162}
1163
1164static ssize_t
1165fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1166 const char *buf, size_t count)
1167{
1168 struct fcloop_nport *nport = NULL, *tmpport;
1169 static struct fcloop_rport *rport;
1170 u64 nodename, portname;
1171 unsigned long flags;
1172 int ret;
1173
1174 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1175 if (ret)
1176 return ret;
1177
1178 spin_lock_irqsave(&fcloop_lock, flags);
1179
1180 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1181 if (tmpport->node_name == nodename &&
1182 tmpport->port_name == portname && tmpport->rport) {
1183 nport = tmpport;
1184 rport = __unlink_remote_port(nport);
1185 break;
1186 }
1187 }
1188
1189 spin_unlock_irqrestore(&fcloop_lock, flags);
1190
1191 if (!nport)
1192 return -ENOENT;
1193
1194 ret = __remoteport_unreg(nport, rport);
1195
1196 return ret ? ret : count;
1197}
1198
1199static ssize_t
1200fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1201 const char *buf, size_t count)
1202{
1203 struct nvmet_fc_target_port *targetport;
1204 struct fcloop_nport *nport;
1205 struct fcloop_tport *tport;
1206 struct nvmet_fc_port_info tinfo;
1207 int ret;
1208
1209 nport = fcloop_alloc_nport(buf, count, false);
1210 if (!nport)
1211 return -EIO;
1212
1213 tinfo.node_name = nport->node_name;
1214 tinfo.port_name = nport->port_name;
1215 tinfo.port_id = nport->port_id;
1216
1217 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1218 &targetport);
1219 if (ret) {
1220 fcloop_nport_put(nport);
1221 return ret;
1222 }
1223
1224
1225 tport = targetport->private;
1226 tport->targetport = targetport;
1227 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1228 if (nport->rport)
1229 nport->rport->targetport = targetport;
1230 tport->nport = nport;
1231 tport->lport = nport->lport;
1232 nport->tport = tport;
1233
1234 return count;
1235}
1236
1237
1238static struct fcloop_tport *
1239__unlink_target_port(struct fcloop_nport *nport)
1240{
1241 struct fcloop_tport *tport = nport->tport;
1242
1243 if (tport && nport->rport)
1244 nport->rport->targetport = NULL;
1245 nport->tport = NULL;
1246
1247 return tport;
1248}
1249
1250static int
1251__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1252{
1253 if (!tport)
1254 return -EALREADY;
1255
1256 return nvmet_fc_unregister_targetport(tport->targetport);
1257}
1258
1259static ssize_t
1260fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1261 const char *buf, size_t count)
1262{
1263 struct fcloop_nport *nport = NULL, *tmpport;
1264 struct fcloop_tport *tport = NULL;
1265 u64 nodename, portname;
1266 unsigned long flags;
1267 int ret;
1268
1269 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1270 if (ret)
1271 return ret;
1272
1273 spin_lock_irqsave(&fcloop_lock, flags);
1274
1275 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1276 if (tmpport->node_name == nodename &&
1277 tmpport->port_name == portname && tmpport->tport) {
1278 nport = tmpport;
1279 tport = __unlink_target_port(nport);
1280 break;
1281 }
1282 }
1283
1284 spin_unlock_irqrestore(&fcloop_lock, flags);
1285
1286 if (!nport)
1287 return -ENOENT;
1288
1289 ret = __targetport_unreg(nport, tport);
1290
1291 return ret ? ret : count;
1292}
1293
1294
1295static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1296static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1297static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1298static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1299static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1300static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1301
1302static struct attribute *fcloop_dev_attrs[] = {
1303 &dev_attr_add_local_port.attr,
1304 &dev_attr_del_local_port.attr,
1305 &dev_attr_add_remote_port.attr,
1306 &dev_attr_del_remote_port.attr,
1307 &dev_attr_add_target_port.attr,
1308 &dev_attr_del_target_port.attr,
1309 NULL
1310};
1311
1312static struct attribute_group fclopp_dev_attrs_group = {
1313 .attrs = fcloop_dev_attrs,
1314};
1315
1316static const struct attribute_group *fcloop_dev_attr_groups[] = {
1317 &fclopp_dev_attrs_group,
1318 NULL,
1319};
1320
1321static struct class *fcloop_class;
1322static struct device *fcloop_device;
1323
1324
1325static int __init fcloop_init(void)
1326{
1327 int ret;
1328
1329 fcloop_class = class_create(THIS_MODULE, "fcloop");
1330 if (IS_ERR(fcloop_class)) {
1331 pr_err("couldn't register class fcloop\n");
1332 ret = PTR_ERR(fcloop_class);
1333 return ret;
1334 }
1335
1336 fcloop_device = device_create_with_groups(
1337 fcloop_class, NULL, MKDEV(0, 0), NULL,
1338 fcloop_dev_attr_groups, "ctl");
1339 if (IS_ERR(fcloop_device)) {
1340 pr_err("couldn't create ctl device!\n");
1341 ret = PTR_ERR(fcloop_device);
1342 goto out_destroy_class;
1343 }
1344
1345 get_device(fcloop_device);
1346
1347 return 0;
1348
1349out_destroy_class:
1350 class_destroy(fcloop_class);
1351 return ret;
1352}
1353
1354static void __exit fcloop_exit(void)
1355{
1356 struct fcloop_lport *lport;
1357 struct fcloop_nport *nport;
1358 struct fcloop_tport *tport;
1359 struct fcloop_rport *rport;
1360 unsigned long flags;
1361 int ret;
1362
1363 spin_lock_irqsave(&fcloop_lock, flags);
1364
1365 for (;;) {
1366 nport = list_first_entry_or_null(&fcloop_nports,
1367 typeof(*nport), nport_list);
1368 if (!nport)
1369 break;
1370
1371 tport = __unlink_target_port(nport);
1372 rport = __unlink_remote_port(nport);
1373
1374 spin_unlock_irqrestore(&fcloop_lock, flags);
1375
1376 ret = __targetport_unreg(nport, tport);
1377 if (ret)
1378 pr_warn("%s: Failed deleting target port\n", __func__);
1379
1380 ret = __remoteport_unreg(nport, rport);
1381 if (ret)
1382 pr_warn("%s: Failed deleting remote port\n", __func__);
1383
1384 spin_lock_irqsave(&fcloop_lock, flags);
1385 }
1386
1387 for (;;) {
1388 lport = list_first_entry_or_null(&fcloop_lports,
1389 typeof(*lport), lport_list);
1390 if (!lport)
1391 break;
1392
1393 __unlink_local_port(lport);
1394
1395 spin_unlock_irqrestore(&fcloop_lock, flags);
1396
1397 ret = __wait_localport_unreg(lport);
1398 if (ret)
1399 pr_warn("%s: Failed deleting local port\n", __func__);
1400
1401 spin_lock_irqsave(&fcloop_lock, flags);
1402 }
1403
1404 spin_unlock_irqrestore(&fcloop_lock, flags);
1405
1406 put_device(fcloop_device);
1407
1408 device_destroy(fcloop_class, MKDEV(0, 0));
1409 class_destroy(fcloop_class);
1410}
1411
1412module_init(fcloop_init);
1413module_exit(fcloop_exit);
1414
1415MODULE_LICENSE("GPL v2");
1416