1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/blk-mq.h>
9#include <linux/parser.h>
10#include <linux/random.h>
11#include <uapi/scsi/fc/fc_fs.h>
12#include <uapi/scsi/fc/fc_els.h>
13
14#include "nvmet.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17#include "../host/fc.h"
18
19
20
21
22
23#define NVMET_LS_CTX_COUNT 256
24
25struct nvmet_fc_tgtport;
26struct nvmet_fc_tgt_assoc;
27
28struct nvmet_fc_ls_iod {
29 struct nvmefc_ls_rsp *lsrsp;
30 struct nvmefc_tgt_fcp_req *fcpreq;
31
32 struct list_head ls_rcv_list;
33
34 struct nvmet_fc_tgtport *tgtport;
35 struct nvmet_fc_tgt_assoc *assoc;
36 void *hosthandle;
37
38 union nvmefc_ls_requests *rqstbuf;
39 union nvmefc_ls_responses *rspbuf;
40 u16 rqstdatalen;
41 dma_addr_t rspdma;
42
43 struct scatterlist sg[2];
44
45 struct work_struct work;
46} __aligned(sizeof(unsigned long long));
47
48struct nvmet_fc_ls_req_op {
49 struct nvmefc_ls_req ls_req;
50
51 struct nvmet_fc_tgtport *tgtport;
52 void *hosthandle;
53
54 int ls_error;
55 struct list_head lsreq_list;
56 bool req_queued;
57};
58
59
60
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62
63enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68};
69
70struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *next_sg;
77 struct scatterlist *data_sg;
78 int data_sg_cnt;
79 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
83 bool aborted;
84 bool writedataactive;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct defer_work;
89
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
92
93 struct list_head fcp_list;
94};
95
96struct nvmet_fc_tgtport {
97 struct nvmet_fc_target_port fc_target_port;
98
99 struct list_head tgt_list;
100 struct device *dev;
101 struct nvmet_fc_target_template *ops;
102
103 struct nvmet_fc_ls_iod *iod;
104 spinlock_t lock;
105 struct list_head ls_rcv_list;
106 struct list_head ls_req_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct list_head host_list;
110 struct ida assoc_cnt;
111 struct nvmet_fc_port_entry *pe;
112 struct kref ref;
113 u32 max_sg_cnt;
114};
115
116struct nvmet_fc_port_entry {
117 struct nvmet_fc_tgtport *tgtport;
118 struct nvmet_port *port;
119 u64 node_name;
120 u64 port_name;
121 struct list_head pe_list;
122};
123
124struct nvmet_fc_defer_fcp_req {
125 struct list_head req_list;
126 struct nvmefc_tgt_fcp_req *fcp_req;
127};
128
129struct nvmet_fc_tgt_queue {
130 bool ninetypercent;
131 u16 qid;
132 u16 sqsize;
133 u16 ersp_ratio;
134 __le16 sqhd;
135 atomic_t connected;
136 atomic_t sqtail;
137 atomic_t zrspcnt;
138 atomic_t rsn;
139 spinlock_t qlock;
140 struct nvmet_cq nvme_cq;
141 struct nvmet_sq nvme_sq;
142 struct nvmet_fc_tgt_assoc *assoc;
143 struct list_head fod_list;
144 struct list_head pending_cmd_list;
145 struct list_head avail_defer_list;
146 struct workqueue_struct *work_q;
147 struct kref ref;
148 struct rcu_head rcu;
149 struct nvmet_fc_fcp_iod fod[];
150} __aligned(sizeof(unsigned long long));
151
152struct nvmet_fc_hostport {
153 struct nvmet_fc_tgtport *tgtport;
154 void *hosthandle;
155 struct list_head host_list;
156 struct kref ref;
157 u8 invalid;
158};
159
160struct nvmet_fc_tgt_assoc {
161 u64 association_id;
162 u32 a_id;
163 atomic_t terminating;
164 struct nvmet_fc_tgtport *tgtport;
165 struct nvmet_fc_hostport *hostport;
166 struct nvmet_fc_ls_iod *rcv_disconn;
167 struct list_head a_list;
168 struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
169 struct kref ref;
170 struct work_struct del_work;
171 struct rcu_head rcu;
172};
173
174
175static inline int
176nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
177{
178 return (iodptr - iodptr->tgtport->iod);
179}
180
181static inline int
182nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
183{
184 return (fodptr - fodptr->queue->fod);
185}
186
187
188
189
190
191
192
193
194
195
196
197
198#define BYTES_FOR_QID sizeof(u16)
199#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
200#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
201
202static inline u64
203nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
204{
205 return (assoc->association_id | qid);
206}
207
208static inline u64
209nvmet_fc_getassociationid(u64 connectionid)
210{
211 return connectionid & ~NVMET_FC_QUEUEID_MASK;
212}
213
214static inline u16
215nvmet_fc_getqueueid(u64 connectionid)
216{
217 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
218}
219
220static inline struct nvmet_fc_tgtport *
221targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
222{
223 return container_of(targetport, struct nvmet_fc_tgtport,
224 fc_target_port);
225}
226
227static inline struct nvmet_fc_fcp_iod *
228nvmet_req_to_fod(struct nvmet_req *nvme_req)
229{
230 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
231}
232
233
234
235
236
237static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
238
239static LIST_HEAD(nvmet_fc_target_list);
240static DEFINE_IDA(nvmet_fc_tgtport_cnt);
241static LIST_HEAD(nvmet_fc_portentry_list);
242
243
244static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
245static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
246static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
247static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
248static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
249static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
250static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
251static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
252static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
253 struct nvmet_fc_fcp_iod *fod);
254static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
255static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
256 struct nvmet_fc_ls_iod *iod);
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277static inline dma_addr_t
278fc_dma_map_single(struct device *dev, void *ptr, size_t size,
279 enum dma_data_direction dir)
280{
281 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
282}
283
284static inline int
285fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
286{
287 return dev ? dma_mapping_error(dev, dma_addr) : 0;
288}
289
290static inline void
291fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
292 enum dma_data_direction dir)
293{
294 if (dev)
295 dma_unmap_single(dev, addr, size, dir);
296}
297
298static inline void
299fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
300 enum dma_data_direction dir)
301{
302 if (dev)
303 dma_sync_single_for_cpu(dev, addr, size, dir);
304}
305
306static inline void
307fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
308 enum dma_data_direction dir)
309{
310 if (dev)
311 dma_sync_single_for_device(dev, addr, size, dir);
312}
313
314
315static int
316fc_map_sg(struct scatterlist *sg, int nents)
317{
318 struct scatterlist *s;
319 int i;
320
321 WARN_ON(nents == 0 || sg[0].length == 0);
322
323 for_each_sg(sg, s, nents, i) {
324 s->dma_address = 0L;
325#ifdef CONFIG_NEED_SG_DMA_LENGTH
326 s->dma_length = s->length;
327#endif
328 }
329 return nents;
330}
331
332static inline int
333fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
334 enum dma_data_direction dir)
335{
336 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
337}
338
339static inline void
340fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
341 enum dma_data_direction dir)
342{
343 if (dev)
344 dma_unmap_sg(dev, sg, nents, dir);
345}
346
347
348
349
350
351static void
352__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
353{
354 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
355 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
356 unsigned long flags;
357
358 spin_lock_irqsave(&tgtport->lock, flags);
359
360 if (!lsop->req_queued) {
361 spin_unlock_irqrestore(&tgtport->lock, flags);
362 return;
363 }
364
365 list_del(&lsop->lsreq_list);
366
367 lsop->req_queued = false;
368
369 spin_unlock_irqrestore(&tgtport->lock, flags);
370
371 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
372 (lsreq->rqstlen + lsreq->rsplen),
373 DMA_BIDIRECTIONAL);
374
375 nvmet_fc_tgtport_put(tgtport);
376}
377
378static int
379__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
380 struct nvmet_fc_ls_req_op *lsop,
381 void (*done)(struct nvmefc_ls_req *req, int status))
382{
383 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
384 unsigned long flags;
385 int ret = 0;
386
387 if (!tgtport->ops->ls_req)
388 return -EOPNOTSUPP;
389
390 if (!nvmet_fc_tgtport_get(tgtport))
391 return -ESHUTDOWN;
392
393 lsreq->done = done;
394 lsop->req_queued = false;
395 INIT_LIST_HEAD(&lsop->lsreq_list);
396
397 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
398 lsreq->rqstlen + lsreq->rsplen,
399 DMA_BIDIRECTIONAL);
400 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
401 ret = -EFAULT;
402 goto out_puttgtport;
403 }
404 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
405
406 spin_lock_irqsave(&tgtport->lock, flags);
407
408 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
409
410 lsop->req_queued = true;
411
412 spin_unlock_irqrestore(&tgtport->lock, flags);
413
414 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
415 lsreq);
416 if (ret)
417 goto out_unlink;
418
419 return 0;
420
421out_unlink:
422 lsop->ls_error = ret;
423 spin_lock_irqsave(&tgtport->lock, flags);
424 lsop->req_queued = false;
425 list_del(&lsop->lsreq_list);
426 spin_unlock_irqrestore(&tgtport->lock, flags);
427 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
428 (lsreq->rqstlen + lsreq->rsplen),
429 DMA_BIDIRECTIONAL);
430out_puttgtport:
431 nvmet_fc_tgtport_put(tgtport);
432
433 return ret;
434}
435
436static int
437nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
438 struct nvmet_fc_ls_req_op *lsop,
439 void (*done)(struct nvmefc_ls_req *req, int status))
440{
441
442
443 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
444}
445
446static void
447nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
448{
449 struct nvmet_fc_ls_req_op *lsop =
450 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
451
452 __nvmet_fc_finish_ls_req(lsop);
453
454
455
456 kfree(lsop);
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static void
477nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
478{
479 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
480 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
481 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
482 struct nvmet_fc_ls_req_op *lsop;
483 struct nvmefc_ls_req *lsreq;
484 int ret;
485
486
487
488
489
490
491 if (!tgtport->ops->ls_req || !assoc->hostport ||
492 assoc->hostport->invalid)
493 return;
494
495 lsop = kzalloc((sizeof(*lsop) +
496 sizeof(*discon_rqst) + sizeof(*discon_acc) +
497 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
498 if (!lsop) {
499 dev_info(tgtport->dev,
500 "{%d:%d} send Disconnect Association failed: ENOMEM\n",
501 tgtport->fc_target_port.port_num, assoc->a_id);
502 return;
503 }
504
505 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
506 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
507 lsreq = &lsop->ls_req;
508 if (tgtport->ops->lsrqst_priv_sz)
509 lsreq->private = (void *)&discon_acc[1];
510 else
511 lsreq->private = NULL;
512
513 lsop->tgtport = tgtport;
514 lsop->hosthandle = assoc->hostport->hosthandle;
515
516 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
517 assoc->association_id);
518
519 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
520 nvmet_fc_disconnect_assoc_done);
521 if (ret) {
522 dev_info(tgtport->dev,
523 "{%d:%d} XMT Disconnect Association failed: %d\n",
524 tgtport->fc_target_port.port_num, assoc->a_id, ret);
525 kfree(lsop);
526 }
527}
528
529
530
531
532
533static int
534nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
535{
536 struct nvmet_fc_ls_iod *iod;
537 int i;
538
539 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
540 GFP_KERNEL);
541 if (!iod)
542 return -ENOMEM;
543
544 tgtport->iod = iod;
545
546 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
547 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
548 iod->tgtport = tgtport;
549 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
550
551 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
552 sizeof(union nvmefc_ls_responses),
553 GFP_KERNEL);
554 if (!iod->rqstbuf)
555 goto out_fail;
556
557 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
558
559 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
560 sizeof(*iod->rspbuf),
561 DMA_TO_DEVICE);
562 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
563 goto out_fail;
564 }
565
566 return 0;
567
568out_fail:
569 kfree(iod->rqstbuf);
570 list_del(&iod->ls_rcv_list);
571 for (iod--, i--; i >= 0; iod--, i--) {
572 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
573 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
574 kfree(iod->rqstbuf);
575 list_del(&iod->ls_rcv_list);
576 }
577
578 kfree(iod);
579
580 return -EFAULT;
581}
582
583static void
584nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
585{
586 struct nvmet_fc_ls_iod *iod = tgtport->iod;
587 int i;
588
589 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
590 fc_dma_unmap_single(tgtport->dev,
591 iod->rspdma, sizeof(*iod->rspbuf),
592 DMA_TO_DEVICE);
593 kfree(iod->rqstbuf);
594 list_del(&iod->ls_rcv_list);
595 }
596 kfree(tgtport->iod);
597}
598
599static struct nvmet_fc_ls_iod *
600nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
601{
602 struct nvmet_fc_ls_iod *iod;
603 unsigned long flags;
604
605 spin_lock_irqsave(&tgtport->lock, flags);
606 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
607 struct nvmet_fc_ls_iod, ls_rcv_list);
608 if (iod)
609 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
610 spin_unlock_irqrestore(&tgtport->lock, flags);
611 return iod;
612}
613
614
615static void
616nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
617 struct nvmet_fc_ls_iod *iod)
618{
619 unsigned long flags;
620
621 spin_lock_irqsave(&tgtport->lock, flags);
622 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
623 spin_unlock_irqrestore(&tgtport->lock, flags);
624}
625
626static void
627nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
628 struct nvmet_fc_tgt_queue *queue)
629{
630 struct nvmet_fc_fcp_iod *fod = queue->fod;
631 int i;
632
633 for (i = 0; i < queue->sqsize; fod++, i++) {
634 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
635 fod->tgtport = tgtport;
636 fod->queue = queue;
637 fod->active = false;
638 fod->abort = false;
639 fod->aborted = false;
640 fod->fcpreq = NULL;
641 list_add_tail(&fod->fcp_list, &queue->fod_list);
642 spin_lock_init(&fod->flock);
643
644 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
645 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
646 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
647 list_del(&fod->fcp_list);
648 for (fod--, i--; i >= 0; fod--, i--) {
649 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
650 sizeof(fod->rspiubuf),
651 DMA_TO_DEVICE);
652 fod->rspdma = 0L;
653 list_del(&fod->fcp_list);
654 }
655
656 return;
657 }
658 }
659}
660
661static void
662nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
663 struct nvmet_fc_tgt_queue *queue)
664{
665 struct nvmet_fc_fcp_iod *fod = queue->fod;
666 int i;
667
668 for (i = 0; i < queue->sqsize; fod++, i++) {
669 if (fod->rspdma)
670 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
671 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
672 }
673}
674
675static struct nvmet_fc_fcp_iod *
676nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
677{
678 struct nvmet_fc_fcp_iod *fod;
679
680 lockdep_assert_held(&queue->qlock);
681
682 fod = list_first_entry_or_null(&queue->fod_list,
683 struct nvmet_fc_fcp_iod, fcp_list);
684 if (fod) {
685 list_del(&fod->fcp_list);
686 fod->active = true;
687
688
689
690
691
692 }
693 return fod;
694}
695
696
697static void
698nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
699 struct nvmet_fc_tgt_queue *queue,
700 struct nvmefc_tgt_fcp_req *fcpreq)
701{
702 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
703
704
705
706
707
708 fcpreq->hwqid = queue->qid ?
709 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
710
711 nvmet_fc_handle_fcp_rqst(tgtport, fod);
712}
713
714static void
715nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
716{
717 struct nvmet_fc_fcp_iod *fod =
718 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
719
720
721 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
722
723}
724
725static void
726nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
727 struct nvmet_fc_fcp_iod *fod)
728{
729 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
730 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
731 struct nvmet_fc_defer_fcp_req *deferfcp;
732 unsigned long flags;
733
734 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
735 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
736
737 fcpreq->nvmet_fc_private = NULL;
738
739 fod->active = false;
740 fod->abort = false;
741 fod->aborted = false;
742 fod->writedataactive = false;
743 fod->fcpreq = NULL;
744
745 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
746
747
748 nvmet_fc_tgt_q_put(queue);
749
750 spin_lock_irqsave(&queue->qlock, flags);
751 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
752 struct nvmet_fc_defer_fcp_req, req_list);
753 if (!deferfcp) {
754 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
755 spin_unlock_irqrestore(&queue->qlock, flags);
756 return;
757 }
758
759
760 list_del(&deferfcp->req_list);
761
762 fcpreq = deferfcp->fcp_req;
763
764
765 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
766
767 spin_unlock_irqrestore(&queue->qlock, flags);
768
769
770 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
771
772
773 fcpreq->rspaddr = NULL;
774 fcpreq->rsplen = 0;
775 fcpreq->nvmet_fc_private = fod;
776 fod->fcpreq = fcpreq;
777 fod->active = true;
778
779
780 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
781
782
783
784
785
786
787 queue_work(queue->work_q, &fod->defer_work);
788}
789
790static struct nvmet_fc_tgt_queue *
791nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
792 u16 qid, u16 sqsize)
793{
794 struct nvmet_fc_tgt_queue *queue;
795 int ret;
796
797 if (qid > NVMET_NR_QUEUES)
798 return NULL;
799
800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
801 if (!queue)
802 return NULL;
803
804 if (!nvmet_fc_tgt_a_get(assoc))
805 goto out_free_queue;
806
807 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
808 assoc->tgtport->fc_target_port.port_num,
809 assoc->a_id, qid);
810 if (!queue->work_q)
811 goto out_a_put;
812
813 queue->qid = qid;
814 queue->sqsize = sqsize;
815 queue->assoc = assoc;
816 INIT_LIST_HEAD(&queue->fod_list);
817 INIT_LIST_HEAD(&queue->avail_defer_list);
818 INIT_LIST_HEAD(&queue->pending_cmd_list);
819 atomic_set(&queue->connected, 0);
820 atomic_set(&queue->sqtail, 0);
821 atomic_set(&queue->rsn, 1);
822 atomic_set(&queue->zrspcnt, 0);
823 spin_lock_init(&queue->qlock);
824 kref_init(&queue->ref);
825
826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
827
828 ret = nvmet_sq_init(&queue->nvme_sq);
829 if (ret)
830 goto out_fail_iodlist;
831
832 WARN_ON(assoc->queues[qid]);
833 rcu_assign_pointer(assoc->queues[qid], queue);
834
835 return queue;
836
837out_fail_iodlist:
838 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
839 destroy_workqueue(queue->work_q);
840out_a_put:
841 nvmet_fc_tgt_a_put(assoc);
842out_free_queue:
843 kfree(queue);
844 return NULL;
845}
846
847
848static void
849nvmet_fc_tgt_queue_free(struct kref *ref)
850{
851 struct nvmet_fc_tgt_queue *queue =
852 container_of(ref, struct nvmet_fc_tgt_queue, ref);
853
854 rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
855
856 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
857
858 nvmet_fc_tgt_a_put(queue->assoc);
859
860 destroy_workqueue(queue->work_q);
861
862 kfree_rcu(queue, rcu);
863}
864
865static void
866nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
867{
868 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
869}
870
871static int
872nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
873{
874 return kref_get_unless_zero(&queue->ref);
875}
876
877
878static void
879nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
880{
881 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
882 struct nvmet_fc_fcp_iod *fod = queue->fod;
883 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
884 unsigned long flags;
885 int i;
886 bool disconnect;
887
888 disconnect = atomic_xchg(&queue->connected, 0);
889
890
891 if (!disconnect)
892 return;
893
894 spin_lock_irqsave(&queue->qlock, flags);
895
896 for (i = 0; i < queue->sqsize; fod++, i++) {
897 if (fod->active) {
898 spin_lock(&fod->flock);
899 fod->abort = true;
900
901
902
903
904
905 if (fod->writedataactive) {
906 fod->aborted = true;
907 spin_unlock(&fod->flock);
908 tgtport->ops->fcp_abort(
909 &tgtport->fc_target_port, fod->fcpreq);
910 } else
911 spin_unlock(&fod->flock);
912 }
913 }
914
915
916 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
917 req_list) {
918 list_del(&deferfcp->req_list);
919 kfree(deferfcp);
920 }
921
922 for (;;) {
923 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
924 struct nvmet_fc_defer_fcp_req, req_list);
925 if (!deferfcp)
926 break;
927
928 list_del(&deferfcp->req_list);
929 spin_unlock_irqrestore(&queue->qlock, flags);
930
931 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
932 deferfcp->fcp_req);
933
934 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
935 deferfcp->fcp_req);
936
937 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
938 deferfcp->fcp_req);
939
940
941 nvmet_fc_tgt_q_put(queue);
942
943 kfree(deferfcp);
944
945 spin_lock_irqsave(&queue->qlock, flags);
946 }
947 spin_unlock_irqrestore(&queue->qlock, flags);
948
949 flush_workqueue(queue->work_q);
950
951 nvmet_sq_destroy(&queue->nvme_sq);
952
953 nvmet_fc_tgt_q_put(queue);
954}
955
956static struct nvmet_fc_tgt_queue *
957nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
958 u64 connection_id)
959{
960 struct nvmet_fc_tgt_assoc *assoc;
961 struct nvmet_fc_tgt_queue *queue;
962 u64 association_id = nvmet_fc_getassociationid(connection_id);
963 u16 qid = nvmet_fc_getqueueid(connection_id);
964
965 if (qid > NVMET_NR_QUEUES)
966 return NULL;
967
968 rcu_read_lock();
969 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
970 if (association_id == assoc->association_id) {
971 queue = rcu_dereference(assoc->queues[qid]);
972 if (queue &&
973 (!atomic_read(&queue->connected) ||
974 !nvmet_fc_tgt_q_get(queue)))
975 queue = NULL;
976 rcu_read_unlock();
977 return queue;
978 }
979 }
980 rcu_read_unlock();
981 return NULL;
982}
983
984static void
985nvmet_fc_hostport_free(struct kref *ref)
986{
987 struct nvmet_fc_hostport *hostport =
988 container_of(ref, struct nvmet_fc_hostport, ref);
989 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
990 unsigned long flags;
991
992 spin_lock_irqsave(&tgtport->lock, flags);
993 list_del(&hostport->host_list);
994 spin_unlock_irqrestore(&tgtport->lock, flags);
995 if (tgtport->ops->host_release && hostport->invalid)
996 tgtport->ops->host_release(hostport->hosthandle);
997 kfree(hostport);
998 nvmet_fc_tgtport_put(tgtport);
999}
1000
1001static void
1002nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
1003{
1004 kref_put(&hostport->ref, nvmet_fc_hostport_free);
1005}
1006
1007static int
1008nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
1009{
1010 return kref_get_unless_zero(&hostport->ref);
1011}
1012
1013static void
1014nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1015{
1016
1017 if (!hostport || !hostport->hosthandle)
1018 return;
1019
1020 nvmet_fc_hostport_put(hostport);
1021}
1022
1023static struct nvmet_fc_hostport *
1024nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1025{
1026 struct nvmet_fc_hostport *host;
1027
1028 lockdep_assert_held(&tgtport->lock);
1029
1030 list_for_each_entry(host, &tgtport->host_list, host_list) {
1031 if (host->hosthandle == hosthandle && !host->invalid) {
1032 if (nvmet_fc_hostport_get(host))
1033 return (host);
1034 }
1035 }
1036
1037 return NULL;
1038}
1039
1040static struct nvmet_fc_hostport *
1041nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1042{
1043 struct nvmet_fc_hostport *newhost, *match = NULL;
1044 unsigned long flags;
1045
1046
1047 if (!hosthandle)
1048 return NULL;
1049
1050
1051
1052
1053
1054 if (!nvmet_fc_tgtport_get(tgtport))
1055 return ERR_PTR(-EINVAL);
1056
1057 spin_lock_irqsave(&tgtport->lock, flags);
1058 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1059 spin_unlock_irqrestore(&tgtport->lock, flags);
1060
1061 if (match) {
1062
1063 nvmet_fc_tgtport_put(tgtport);
1064 return match;
1065 }
1066
1067 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1068 if (!newhost) {
1069
1070 nvmet_fc_tgtport_put(tgtport);
1071 return ERR_PTR(-ENOMEM);
1072 }
1073
1074 spin_lock_irqsave(&tgtport->lock, flags);
1075 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1076 if (match) {
1077
1078 kfree(newhost);
1079 newhost = match;
1080
1081 nvmet_fc_tgtport_put(tgtport);
1082 } else {
1083 newhost->tgtport = tgtport;
1084 newhost->hosthandle = hosthandle;
1085 INIT_LIST_HEAD(&newhost->host_list);
1086 kref_init(&newhost->ref);
1087
1088 list_add_tail(&newhost->host_list, &tgtport->host_list);
1089 }
1090 spin_unlock_irqrestore(&tgtport->lock, flags);
1091
1092 return newhost;
1093}
1094
1095static void
1096nvmet_fc_delete_assoc(struct work_struct *work)
1097{
1098 struct nvmet_fc_tgt_assoc *assoc =
1099 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1100
1101 nvmet_fc_delete_target_assoc(assoc);
1102 nvmet_fc_tgt_a_put(assoc);
1103}
1104
1105static struct nvmet_fc_tgt_assoc *
1106nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1107{
1108 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1109 unsigned long flags;
1110 u64 ran;
1111 int idx;
1112 bool needrandom = true;
1113
1114 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1115 if (!assoc)
1116 return NULL;
1117
1118 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
1119 if (idx < 0)
1120 goto out_free_assoc;
1121
1122 if (!nvmet_fc_tgtport_get(tgtport))
1123 goto out_ida;
1124
1125 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1126 if (IS_ERR(assoc->hostport))
1127 goto out_put;
1128
1129 assoc->tgtport = tgtport;
1130 assoc->a_id = idx;
1131 INIT_LIST_HEAD(&assoc->a_list);
1132 kref_init(&assoc->ref);
1133 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1134 atomic_set(&assoc->terminating, 0);
1135
1136 while (needrandom) {
1137 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1138 ran = ran << BYTES_FOR_QID_SHIFT;
1139
1140 spin_lock_irqsave(&tgtport->lock, flags);
1141 needrandom = false;
1142 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1143 if (ran == tmpassoc->association_id) {
1144 needrandom = true;
1145 break;
1146 }
1147 }
1148 if (!needrandom) {
1149 assoc->association_id = ran;
1150 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1151 }
1152 spin_unlock_irqrestore(&tgtport->lock, flags);
1153 }
1154
1155 return assoc;
1156
1157out_put:
1158 nvmet_fc_tgtport_put(tgtport);
1159out_ida:
1160 ida_simple_remove(&tgtport->assoc_cnt, idx);
1161out_free_assoc:
1162 kfree(assoc);
1163 return NULL;
1164}
1165
1166static void
1167nvmet_fc_target_assoc_free(struct kref *ref)
1168{
1169 struct nvmet_fc_tgt_assoc *assoc =
1170 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1171 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1172 struct nvmet_fc_ls_iod *oldls;
1173 unsigned long flags;
1174
1175
1176 nvmet_fc_xmt_disconnect_assoc(assoc);
1177
1178 nvmet_fc_free_hostport(assoc->hostport);
1179 spin_lock_irqsave(&tgtport->lock, flags);
1180 list_del_rcu(&assoc->a_list);
1181 oldls = assoc->rcv_disconn;
1182 spin_unlock_irqrestore(&tgtport->lock, flags);
1183
1184 if (oldls)
1185 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1186 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1187 dev_info(tgtport->dev,
1188 "{%d:%d} Association freed\n",
1189 tgtport->fc_target_port.port_num, assoc->a_id);
1190 kfree_rcu(assoc, rcu);
1191 nvmet_fc_tgtport_put(tgtport);
1192}
1193
1194static void
1195nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1196{
1197 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1198}
1199
1200static int
1201nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1202{
1203 return kref_get_unless_zero(&assoc->ref);
1204}
1205
1206static void
1207nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1208{
1209 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1210 struct nvmet_fc_tgt_queue *queue;
1211 int i, terminating;
1212
1213 terminating = atomic_xchg(&assoc->terminating, 1);
1214
1215
1216 if (terminating)
1217 return;
1218
1219
1220 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1221 rcu_read_lock();
1222 queue = rcu_dereference(assoc->queues[i]);
1223 if (!queue) {
1224 rcu_read_unlock();
1225 continue;
1226 }
1227
1228 if (!nvmet_fc_tgt_q_get(queue)) {
1229 rcu_read_unlock();
1230 continue;
1231 }
1232 rcu_read_unlock();
1233 nvmet_fc_delete_target_queue(queue);
1234 nvmet_fc_tgt_q_put(queue);
1235 }
1236
1237 dev_info(tgtport->dev,
1238 "{%d:%d} Association deleted\n",
1239 tgtport->fc_target_port.port_num, assoc->a_id);
1240
1241 nvmet_fc_tgt_a_put(assoc);
1242}
1243
1244static struct nvmet_fc_tgt_assoc *
1245nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1246 u64 association_id)
1247{
1248 struct nvmet_fc_tgt_assoc *assoc;
1249 struct nvmet_fc_tgt_assoc *ret = NULL;
1250
1251 rcu_read_lock();
1252 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1253 if (association_id == assoc->association_id) {
1254 ret = assoc;
1255 if (!nvmet_fc_tgt_a_get(assoc))
1256 ret = NULL;
1257 break;
1258 }
1259 }
1260 rcu_read_unlock();
1261
1262 return ret;
1263}
1264
1265static void
1266nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1267 struct nvmet_fc_port_entry *pe,
1268 struct nvmet_port *port)
1269{
1270 lockdep_assert_held(&nvmet_fc_tgtlock);
1271
1272 pe->tgtport = tgtport;
1273 tgtport->pe = pe;
1274
1275 pe->port = port;
1276 port->priv = pe;
1277
1278 pe->node_name = tgtport->fc_target_port.node_name;
1279 pe->port_name = tgtport->fc_target_port.port_name;
1280 INIT_LIST_HEAD(&pe->pe_list);
1281
1282 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1283}
1284
1285static void
1286nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1287{
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1291 if (pe->tgtport)
1292 pe->tgtport->pe = NULL;
1293 list_del(&pe->pe_list);
1294 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1295}
1296
1297
1298
1299
1300
1301
1302static void
1303nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1304{
1305 struct nvmet_fc_port_entry *pe;
1306 unsigned long flags;
1307
1308 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1309 pe = tgtport->pe;
1310 if (pe)
1311 pe->tgtport = NULL;
1312 tgtport->pe = NULL;
1313 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static void
1325nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1326{
1327 struct nvmet_fc_port_entry *pe;
1328 unsigned long flags;
1329
1330 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1331 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1332 if (tgtport->fc_target_port.node_name == pe->node_name &&
1333 tgtport->fc_target_port.port_name == pe->port_name) {
1334 WARN_ON(pe->tgtport);
1335 tgtport->pe = pe;
1336 pe->tgtport = tgtport;
1337 break;
1338 }
1339 }
1340 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360int
1361nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1362 struct nvmet_fc_target_template *template,
1363 struct device *dev,
1364 struct nvmet_fc_target_port **portptr)
1365{
1366 struct nvmet_fc_tgtport *newrec;
1367 unsigned long flags;
1368 int ret, idx;
1369
1370 if (!template->xmt_ls_rsp || !template->fcp_op ||
1371 !template->fcp_abort ||
1372 !template->fcp_req_release || !template->targetport_delete ||
1373 !template->max_hw_queues || !template->max_sgl_segments ||
1374 !template->max_dif_sgl_segments || !template->dma_boundary) {
1375 ret = -EINVAL;
1376 goto out_regtgt_failed;
1377 }
1378
1379 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1380 GFP_KERNEL);
1381 if (!newrec) {
1382 ret = -ENOMEM;
1383 goto out_regtgt_failed;
1384 }
1385
1386 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1387 if (idx < 0) {
1388 ret = -ENOSPC;
1389 goto out_fail_kfree;
1390 }
1391
1392 if (!get_device(dev) && dev) {
1393 ret = -ENODEV;
1394 goto out_ida_put;
1395 }
1396
1397 newrec->fc_target_port.node_name = pinfo->node_name;
1398 newrec->fc_target_port.port_name = pinfo->port_name;
1399 if (template->target_priv_sz)
1400 newrec->fc_target_port.private = &newrec[1];
1401 else
1402 newrec->fc_target_port.private = NULL;
1403 newrec->fc_target_port.port_id = pinfo->port_id;
1404 newrec->fc_target_port.port_num = idx;
1405 INIT_LIST_HEAD(&newrec->tgt_list);
1406 newrec->dev = dev;
1407 newrec->ops = template;
1408 spin_lock_init(&newrec->lock);
1409 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1410 INIT_LIST_HEAD(&newrec->ls_req_list);
1411 INIT_LIST_HEAD(&newrec->ls_busylist);
1412 INIT_LIST_HEAD(&newrec->assoc_list);
1413 INIT_LIST_HEAD(&newrec->host_list);
1414 kref_init(&newrec->ref);
1415 ida_init(&newrec->assoc_cnt);
1416 newrec->max_sg_cnt = template->max_sgl_segments;
1417
1418 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1419 if (ret) {
1420 ret = -ENOMEM;
1421 goto out_free_newrec;
1422 }
1423
1424 nvmet_fc_portentry_rebind_tgt(newrec);
1425
1426 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1427 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1428 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1429
1430 *portptr = &newrec->fc_target_port;
1431 return 0;
1432
1433out_free_newrec:
1434 put_device(dev);
1435out_ida_put:
1436 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1437out_fail_kfree:
1438 kfree(newrec);
1439out_regtgt_failed:
1440 *portptr = NULL;
1441 return ret;
1442}
1443EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1444
1445
1446static void
1447nvmet_fc_free_tgtport(struct kref *ref)
1448{
1449 struct nvmet_fc_tgtport *tgtport =
1450 container_of(ref, struct nvmet_fc_tgtport, ref);
1451 struct device *dev = tgtport->dev;
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1455 list_del(&tgtport->tgt_list);
1456 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1457
1458 nvmet_fc_free_ls_iodlist(tgtport);
1459
1460
1461 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1462
1463 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1464 tgtport->fc_target_port.port_num);
1465
1466 ida_destroy(&tgtport->assoc_cnt);
1467
1468 kfree(tgtport);
1469
1470 put_device(dev);
1471}
1472
1473static void
1474nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1475{
1476 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1477}
1478
1479static int
1480nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1481{
1482 return kref_get_unless_zero(&tgtport->ref);
1483}
1484
1485static void
1486__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1487{
1488 struct nvmet_fc_tgt_assoc *assoc;
1489
1490 rcu_read_lock();
1491 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1492 if (!nvmet_fc_tgt_a_get(assoc))
1493 continue;
1494 if (!schedule_work(&assoc->del_work))
1495
1496 nvmet_fc_tgt_a_put(assoc);
1497 }
1498 rcu_read_unlock();
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530void
1531nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1532 void *hosthandle)
1533{
1534 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1535 struct nvmet_fc_tgt_assoc *assoc, *next;
1536 unsigned long flags;
1537 bool noassoc = true;
1538
1539 spin_lock_irqsave(&tgtport->lock, flags);
1540 list_for_each_entry_safe(assoc, next,
1541 &tgtport->assoc_list, a_list) {
1542 if (!assoc->hostport ||
1543 assoc->hostport->hosthandle != hosthandle)
1544 continue;
1545 if (!nvmet_fc_tgt_a_get(assoc))
1546 continue;
1547 assoc->hostport->invalid = 1;
1548 noassoc = false;
1549 if (!schedule_work(&assoc->del_work))
1550
1551 nvmet_fc_tgt_a_put(assoc);
1552 }
1553 spin_unlock_irqrestore(&tgtport->lock, flags);
1554
1555
1556 if (noassoc && tgtport->ops->host_release)
1557 tgtport->ops->host_release(hosthandle);
1558}
1559EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1560
1561
1562
1563
1564static void
1565nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1566{
1567 struct nvmet_fc_tgtport *tgtport, *next;
1568 struct nvmet_fc_tgt_assoc *assoc;
1569 struct nvmet_fc_tgt_queue *queue;
1570 unsigned long flags;
1571 bool found_ctrl = false;
1572
1573
1574 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1575 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1576 tgt_list) {
1577 if (!nvmet_fc_tgtport_get(tgtport))
1578 continue;
1579 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1580
1581 rcu_read_lock();
1582 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1583 queue = rcu_dereference(assoc->queues[0]);
1584 if (queue && queue->nvme_sq.ctrl == ctrl) {
1585 if (nvmet_fc_tgt_a_get(assoc))
1586 found_ctrl = true;
1587 break;
1588 }
1589 }
1590 rcu_read_unlock();
1591
1592 nvmet_fc_tgtport_put(tgtport);
1593
1594 if (found_ctrl) {
1595 if (!schedule_work(&assoc->del_work))
1596
1597 nvmet_fc_tgt_a_put(assoc);
1598 return;
1599 }
1600
1601 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1602 }
1603 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617int
1618nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1619{
1620 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1621
1622 nvmet_fc_portentry_unbind_tgt(tgtport);
1623
1624
1625 __nvmet_fc_free_assocs(tgtport);
1626
1627
1628
1629
1630
1631
1632
1633
1634 nvmet_fc_tgtport_put(tgtport);
1635
1636 return 0;
1637}
1638EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1639
1640
1641
1642
1643
1644static void
1645nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1646 struct nvmet_fc_ls_iod *iod)
1647{
1648 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1649 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1650 struct nvmet_fc_tgt_queue *queue;
1651 int ret = 0;
1652
1653 memset(acc, 0, sizeof(*acc));
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1664 ret = VERR_CR_ASSOC_LEN;
1665 else if (be32_to_cpu(rqst->desc_list_len) <
1666 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1667 ret = VERR_CR_ASSOC_RQST_LEN;
1668 else if (rqst->assoc_cmd.desc_tag !=
1669 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1670 ret = VERR_CR_ASSOC_CMD;
1671 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1672 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1673 ret = VERR_CR_ASSOC_CMD_LEN;
1674 else if (!rqst->assoc_cmd.ersp_ratio ||
1675 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1676 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1677 ret = VERR_ERSP_RATIO;
1678
1679 else {
1680
1681 iod->assoc = nvmet_fc_alloc_target_assoc(
1682 tgtport, iod->hosthandle);
1683 if (!iod->assoc)
1684 ret = VERR_ASSOC_ALLOC_FAIL;
1685 else {
1686 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1687 be16_to_cpu(rqst->assoc_cmd.sqsize));
1688 if (!queue)
1689 ret = VERR_QUEUE_ALLOC_FAIL;
1690 }
1691 }
1692
1693 if (ret) {
1694 dev_err(tgtport->dev,
1695 "Create Association LS failed: %s\n",
1696 validation_errors[ret]);
1697 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1698 sizeof(*acc), rqst->w0.ls_cmd,
1699 FCNVME_RJT_RC_LOGIC,
1700 FCNVME_RJT_EXP_NONE, 0);
1701 return;
1702 }
1703
1704 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1705 atomic_set(&queue->connected, 1);
1706 queue->sqhd = 0;
1707
1708 dev_info(tgtport->dev,
1709 "{%d:%d} Association created\n",
1710 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1711
1712
1713
1714 iod->lsrsp->rsplen = sizeof(*acc);
1715
1716 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1717 fcnvme_lsdesc_len(
1718 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1719 FCNVME_LS_CREATE_ASSOCIATION);
1720 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1721 acc->associd.desc_len =
1722 fcnvme_lsdesc_len(
1723 sizeof(struct fcnvme_lsdesc_assoc_id));
1724 acc->associd.association_id =
1725 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1726 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1727 acc->connectid.desc_len =
1728 fcnvme_lsdesc_len(
1729 sizeof(struct fcnvme_lsdesc_conn_id));
1730 acc->connectid.connection_id = acc->associd.association_id;
1731}
1732
1733static void
1734nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1735 struct nvmet_fc_ls_iod *iod)
1736{
1737 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1738 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1739 struct nvmet_fc_tgt_queue *queue;
1740 int ret = 0;
1741
1742 memset(acc, 0, sizeof(*acc));
1743
1744 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1745 ret = VERR_CR_CONN_LEN;
1746 else if (rqst->desc_list_len !=
1747 fcnvme_lsdesc_len(
1748 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1749 ret = VERR_CR_CONN_RQST_LEN;
1750 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1751 ret = VERR_ASSOC_ID;
1752 else if (rqst->associd.desc_len !=
1753 fcnvme_lsdesc_len(
1754 sizeof(struct fcnvme_lsdesc_assoc_id)))
1755 ret = VERR_ASSOC_ID_LEN;
1756 else if (rqst->connect_cmd.desc_tag !=
1757 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1758 ret = VERR_CR_CONN_CMD;
1759 else if (rqst->connect_cmd.desc_len !=
1760 fcnvme_lsdesc_len(
1761 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1762 ret = VERR_CR_CONN_CMD_LEN;
1763 else if (!rqst->connect_cmd.ersp_ratio ||
1764 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1765 be16_to_cpu(rqst->connect_cmd.sqsize)))
1766 ret = VERR_ERSP_RATIO;
1767
1768 else {
1769
1770 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1771 be64_to_cpu(rqst->associd.association_id));
1772 if (!iod->assoc)
1773 ret = VERR_NO_ASSOC;
1774 else {
1775 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1776 be16_to_cpu(rqst->connect_cmd.qid),
1777 be16_to_cpu(rqst->connect_cmd.sqsize));
1778 if (!queue)
1779 ret = VERR_QUEUE_ALLOC_FAIL;
1780
1781
1782 nvmet_fc_tgt_a_put(iod->assoc);
1783 }
1784 }
1785
1786 if (ret) {
1787 dev_err(tgtport->dev,
1788 "Create Connection LS failed: %s\n",
1789 validation_errors[ret]);
1790 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1791 sizeof(*acc), rqst->w0.ls_cmd,
1792 (ret == VERR_NO_ASSOC) ?
1793 FCNVME_RJT_RC_INV_ASSOC :
1794 FCNVME_RJT_RC_LOGIC,
1795 FCNVME_RJT_EXP_NONE, 0);
1796 return;
1797 }
1798
1799 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1800 atomic_set(&queue->connected, 1);
1801 queue->sqhd = 0;
1802
1803
1804
1805 iod->lsrsp->rsplen = sizeof(*acc);
1806
1807 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1808 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1809 FCNVME_LS_CREATE_CONNECTION);
1810 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1811 acc->connectid.desc_len =
1812 fcnvme_lsdesc_len(
1813 sizeof(struct fcnvme_lsdesc_conn_id));
1814 acc->connectid.connection_id =
1815 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1816 be16_to_cpu(rqst->connect_cmd.qid)));
1817}
1818
1819
1820
1821
1822
1823static int
1824nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1825 struct nvmet_fc_ls_iod *iod)
1826{
1827 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1828 &iod->rqstbuf->rq_dis_assoc;
1829 struct fcnvme_ls_disconnect_assoc_acc *acc =
1830 &iod->rspbuf->rsp_dis_assoc;
1831 struct nvmet_fc_tgt_assoc *assoc = NULL;
1832 struct nvmet_fc_ls_iod *oldls = NULL;
1833 unsigned long flags;
1834 int ret = 0;
1835
1836 memset(acc, 0, sizeof(*acc));
1837
1838 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1839 if (!ret) {
1840
1841 assoc = nvmet_fc_find_target_assoc(tgtport,
1842 be64_to_cpu(rqst->associd.association_id));
1843 iod->assoc = assoc;
1844 if (!assoc)
1845 ret = VERR_NO_ASSOC;
1846 }
1847
1848 if (ret || !assoc) {
1849 dev_err(tgtport->dev,
1850 "Disconnect LS failed: %s\n",
1851 validation_errors[ret]);
1852 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1853 sizeof(*acc), rqst->w0.ls_cmd,
1854 (ret == VERR_NO_ASSOC) ?
1855 FCNVME_RJT_RC_INV_ASSOC :
1856 FCNVME_RJT_RC_LOGIC,
1857 FCNVME_RJT_EXP_NONE, 0);
1858 return true;
1859 }
1860
1861
1862
1863 iod->lsrsp->rsplen = sizeof(*acc);
1864
1865 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1866 fcnvme_lsdesc_len(
1867 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1868 FCNVME_LS_DISCONNECT_ASSOC);
1869
1870
1871 nvmet_fc_tgt_a_put(assoc);
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 spin_lock_irqsave(&tgtport->lock, flags);
1883 oldls = assoc->rcv_disconn;
1884 assoc->rcv_disconn = iod;
1885 spin_unlock_irqrestore(&tgtport->lock, flags);
1886
1887 nvmet_fc_delete_target_assoc(assoc);
1888
1889 if (oldls) {
1890 dev_info(tgtport->dev,
1891 "{%d:%d} Multiple Disconnect Association LS's "
1892 "received\n",
1893 tgtport->fc_target_port.port_num, assoc->a_id);
1894
1895 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1896 sizeof(*iod->rspbuf),
1897
1898 rqst->w0.ls_cmd,
1899 FCNVME_RJT_RC_UNAB,
1900 FCNVME_RJT_EXP_NONE, 0);
1901 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1902 }
1903
1904 return false;
1905}
1906
1907
1908
1909
1910
1911static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1912
1913static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1914
1915static void
1916nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1917{
1918 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1919 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1920
1921 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1922 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1923 nvmet_fc_free_ls_iod(tgtport, iod);
1924 nvmet_fc_tgtport_put(tgtport);
1925}
1926
1927static void
1928nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1929 struct nvmet_fc_ls_iod *iod)
1930{
1931 int ret;
1932
1933 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1934 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1935
1936 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1937 if (ret)
1938 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1939}
1940
1941
1942
1943
1944static void
1945nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1946 struct nvmet_fc_ls_iod *iod)
1947{
1948 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1949 bool sendrsp = true;
1950
1951 iod->lsrsp->nvme_fc_private = iod;
1952 iod->lsrsp->rspbuf = iod->rspbuf;
1953 iod->lsrsp->rspdma = iod->rspdma;
1954 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1955
1956 iod->lsrsp->rsplen = 0;
1957
1958 iod->assoc = NULL;
1959
1960
1961
1962
1963
1964
1965 switch (w0->ls_cmd) {
1966 case FCNVME_LS_CREATE_ASSOCIATION:
1967
1968 nvmet_fc_ls_create_association(tgtport, iod);
1969 break;
1970 case FCNVME_LS_CREATE_CONNECTION:
1971
1972 nvmet_fc_ls_create_connection(tgtport, iod);
1973 break;
1974 case FCNVME_LS_DISCONNECT_ASSOC:
1975
1976 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1977 break;
1978 default:
1979 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1980 sizeof(*iod->rspbuf), w0->ls_cmd,
1981 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1982 }
1983
1984 if (sendrsp)
1985 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1986}
1987
1988
1989
1990
1991static void
1992nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1993{
1994 struct nvmet_fc_ls_iod *iod =
1995 container_of(work, struct nvmet_fc_ls_iod, work);
1996 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1997
1998 nvmet_fc_handle_ls_rqst(tgtport, iod);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020int
2021nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2022 void *hosthandle,
2023 struct nvmefc_ls_rsp *lsrsp,
2024 void *lsreqbuf, u32 lsreqbuf_len)
2025{
2026 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2027 struct nvmet_fc_ls_iod *iod;
2028 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2029
2030 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2031 dev_info(tgtport->dev,
2032 "RCV %s LS failed: payload too large (%d)\n",
2033 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2034 nvmefc_ls_names[w0->ls_cmd] : "",
2035 lsreqbuf_len);
2036 return -E2BIG;
2037 }
2038
2039 if (!nvmet_fc_tgtport_get(tgtport)) {
2040 dev_info(tgtport->dev,
2041 "RCV %s LS failed: target deleting\n",
2042 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2043 nvmefc_ls_names[w0->ls_cmd] : "");
2044 return -ESHUTDOWN;
2045 }
2046
2047 iod = nvmet_fc_alloc_ls_iod(tgtport);
2048 if (!iod) {
2049 dev_info(tgtport->dev,
2050 "RCV %s LS failed: context allocation failed\n",
2051 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2052 nvmefc_ls_names[w0->ls_cmd] : "");
2053 nvmet_fc_tgtport_put(tgtport);
2054 return -ENOENT;
2055 }
2056
2057 iod->lsrsp = lsrsp;
2058 iod->fcpreq = NULL;
2059 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2060 iod->rqstdatalen = lsreqbuf_len;
2061 iod->hosthandle = hosthandle;
2062
2063 schedule_work(&iod->work);
2064
2065 return 0;
2066}
2067EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2068
2069
2070
2071
2072
2073
2074
2075
2076static int
2077nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2078{
2079 struct scatterlist *sg;
2080 unsigned int nent;
2081
2082 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2083 if (!sg)
2084 goto out;
2085
2086 fod->data_sg = sg;
2087 fod->data_sg_cnt = nent;
2088 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2089 ((fod->io_dir == NVMET_FCP_WRITE) ?
2090 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2091
2092 fod->next_sg = fod->data_sg;
2093
2094 return 0;
2095
2096out:
2097 return NVME_SC_INTERNAL;
2098}
2099
2100static void
2101nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2102{
2103 if (!fod->data_sg || !fod->data_sg_cnt)
2104 return;
2105
2106 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2107 ((fod->io_dir == NVMET_FCP_WRITE) ?
2108 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2109 sgl_free(fod->data_sg);
2110 fod->data_sg = NULL;
2111 fod->data_sg_cnt = 0;
2112}
2113
2114
2115static bool
2116queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2117{
2118 u32 sqtail, used;
2119
2120
2121 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2122
2123 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2124 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2125}
2126
2127
2128
2129
2130
2131static void
2132nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2133 struct nvmet_fc_fcp_iod *fod)
2134{
2135 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2136 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2137 struct nvme_completion *cqe = &ersp->cqe;
2138 u32 *cqewd = (u32 *)cqe;
2139 bool send_ersp = false;
2140 u32 rsn, rspcnt, xfr_length;
2141
2142 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2143 xfr_length = fod->req.transfer_len;
2144 else
2145 xfr_length = fod->offset;
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2167 if (!(rspcnt % fod->queue->ersp_ratio) ||
2168 nvme_is_fabrics((struct nvme_command *) sqe) ||
2169 xfr_length != fod->req.transfer_len ||
2170 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2171 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2172 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2173 send_ersp = true;
2174
2175
2176 fod->fcpreq->rspaddr = ersp;
2177 fod->fcpreq->rspdma = fod->rspdma;
2178
2179 if (!send_ersp) {
2180 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2181 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2182 } else {
2183 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2184 rsn = atomic_inc_return(&fod->queue->rsn);
2185 ersp->rsn = cpu_to_be32(rsn);
2186 ersp->xfrd_len = cpu_to_be32(xfr_length);
2187 fod->fcpreq->rsplen = sizeof(*ersp);
2188 }
2189
2190 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2191 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2192}
2193
2194static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2195
2196static void
2197nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2198 struct nvmet_fc_fcp_iod *fod)
2199{
2200 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2201
2202
2203 nvmet_fc_free_tgt_pgs(fod);
2204
2205
2206
2207
2208
2209
2210 if (!fod->aborted)
2211 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2212
2213 nvmet_fc_free_fcp_iod(fod->queue, fod);
2214}
2215
2216static void
2217nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2218 struct nvmet_fc_fcp_iod *fod)
2219{
2220 int ret;
2221
2222 fod->fcpreq->op = NVMET_FCOP_RSP;
2223 fod->fcpreq->timeout = 0;
2224
2225 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2226
2227 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2228 if (ret)
2229 nvmet_fc_abort_op(tgtport, fod);
2230}
2231
2232static void
2233nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2234 struct nvmet_fc_fcp_iod *fod, u8 op)
2235{
2236 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2237 struct scatterlist *sg = fod->next_sg;
2238 unsigned long flags;
2239 u32 remaininglen = fod->req.transfer_len - fod->offset;
2240 u32 tlen = 0;
2241 int ret;
2242
2243 fcpreq->op = op;
2244 fcpreq->offset = fod->offset;
2245 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 fcpreq->sg = sg;
2257 fcpreq->sg_cnt = 0;
2258 while (tlen < remaininglen &&
2259 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2260 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2261 fcpreq->sg_cnt++;
2262 tlen += sg_dma_len(sg);
2263 sg = sg_next(sg);
2264 }
2265 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2266 fcpreq->sg_cnt++;
2267 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2268 sg = sg_next(sg);
2269 }
2270 if (tlen < remaininglen)
2271 fod->next_sg = sg;
2272 else
2273 fod->next_sg = NULL;
2274
2275 fcpreq->transfer_length = tlen;
2276 fcpreq->transferred_length = 0;
2277 fcpreq->fcp_error = 0;
2278 fcpreq->rsplen = 0;
2279
2280
2281
2282
2283
2284 if ((op == NVMET_FCOP_READDATA) &&
2285 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2286 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2287 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2288 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2289 }
2290
2291 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2292 if (ret) {
2293
2294
2295
2296
2297
2298 fod->abort = true;
2299
2300 if (op == NVMET_FCOP_WRITEDATA) {
2301 spin_lock_irqsave(&fod->flock, flags);
2302 fod->writedataactive = false;
2303 spin_unlock_irqrestore(&fod->flock, flags);
2304 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2305 } else {
2306 fcpreq->fcp_error = ret;
2307 fcpreq->transferred_length = 0;
2308 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2309 }
2310 }
2311}
2312
2313static inline bool
2314__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2315{
2316 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2317 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2318
2319
2320 if (abort) {
2321 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2322 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2323 return true;
2324 }
2325
2326 nvmet_fc_abort_op(tgtport, fod);
2327 return true;
2328 }
2329
2330 return false;
2331}
2332
2333
2334
2335
2336static void
2337nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2338{
2339 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2340 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2341 unsigned long flags;
2342 bool abort;
2343
2344 spin_lock_irqsave(&fod->flock, flags);
2345 abort = fod->abort;
2346 fod->writedataactive = false;
2347 spin_unlock_irqrestore(&fod->flock, flags);
2348
2349 switch (fcpreq->op) {
2350
2351 case NVMET_FCOP_WRITEDATA:
2352 if (__nvmet_fc_fod_op_abort(fod, abort))
2353 return;
2354 if (fcpreq->fcp_error ||
2355 fcpreq->transferred_length != fcpreq->transfer_length) {
2356 spin_lock_irqsave(&fod->flock, flags);
2357 fod->abort = true;
2358 spin_unlock_irqrestore(&fod->flock, flags);
2359
2360 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2361 return;
2362 }
2363
2364 fod->offset += fcpreq->transferred_length;
2365 if (fod->offset != fod->req.transfer_len) {
2366 spin_lock_irqsave(&fod->flock, flags);
2367 fod->writedataactive = true;
2368 spin_unlock_irqrestore(&fod->flock, flags);
2369
2370
2371 nvmet_fc_transfer_fcp_data(tgtport, fod,
2372 NVMET_FCOP_WRITEDATA);
2373 return;
2374 }
2375
2376
2377 fod->req.execute(&fod->req);
2378 break;
2379
2380 case NVMET_FCOP_READDATA:
2381 case NVMET_FCOP_READDATA_RSP:
2382 if (__nvmet_fc_fod_op_abort(fod, abort))
2383 return;
2384 if (fcpreq->fcp_error ||
2385 fcpreq->transferred_length != fcpreq->transfer_length) {
2386 nvmet_fc_abort_op(tgtport, fod);
2387 return;
2388 }
2389
2390
2391
2392 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2393
2394 nvmet_fc_free_tgt_pgs(fod);
2395 nvmet_fc_free_fcp_iod(fod->queue, fod);
2396 return;
2397 }
2398
2399 fod->offset += fcpreq->transferred_length;
2400 if (fod->offset != fod->req.transfer_len) {
2401
2402 nvmet_fc_transfer_fcp_data(tgtport, fod,
2403 NVMET_FCOP_READDATA);
2404 return;
2405 }
2406
2407
2408
2409
2410 nvmet_fc_free_tgt_pgs(fod);
2411
2412 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2413
2414 break;
2415
2416 case NVMET_FCOP_RSP:
2417 if (__nvmet_fc_fod_op_abort(fod, abort))
2418 return;
2419 nvmet_fc_free_fcp_iod(fod->queue, fod);
2420 break;
2421
2422 default:
2423 break;
2424 }
2425}
2426
2427static void
2428nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2429{
2430 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2431
2432 nvmet_fc_fod_op_done(fod);
2433}
2434
2435
2436
2437
2438static void
2439__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2440 struct nvmet_fc_fcp_iod *fod, int status)
2441{
2442 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2443 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2444 unsigned long flags;
2445 bool abort;
2446
2447 spin_lock_irqsave(&fod->flock, flags);
2448 abort = fod->abort;
2449 spin_unlock_irqrestore(&fod->flock, flags);
2450
2451
2452 if (!status)
2453 fod->queue->sqhd = cqe->sq_head;
2454
2455 if (abort) {
2456 nvmet_fc_abort_op(tgtport, fod);
2457 return;
2458 }
2459
2460
2461 if (status) {
2462
2463 memset(cqe, 0, sizeof(*cqe));
2464 cqe->sq_head = fod->queue->sqhd;
2465 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2466 cqe->command_id = sqe->command_id;
2467 cqe->status = cpu_to_le16(status);
2468 } else {
2469
2470
2471
2472
2473
2474
2475 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2476
2477 nvmet_fc_transfer_fcp_data(tgtport, fod,
2478 NVMET_FCOP_READDATA);
2479 return;
2480 }
2481
2482
2483 }
2484
2485
2486 nvmet_fc_free_tgt_pgs(fod);
2487
2488 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2489}
2490
2491
2492static void
2493nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2494{
2495 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2496 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2497
2498 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2499}
2500
2501
2502
2503
2504
2505static void
2506nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2507 struct nvmet_fc_fcp_iod *fod)
2508{
2509 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2510 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2511 int ret;
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2523
2524 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2525 fod->io_dir = NVMET_FCP_WRITE;
2526 if (!nvme_is_write(&cmdiu->sqe))
2527 goto transport_error;
2528 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2529 fod->io_dir = NVMET_FCP_READ;
2530 if (nvme_is_write(&cmdiu->sqe))
2531 goto transport_error;
2532 } else {
2533 fod->io_dir = NVMET_FCP_NODATA;
2534 if (xfrlen)
2535 goto transport_error;
2536 }
2537
2538 fod->req.cmd = &fod->cmdiubuf.sqe;
2539 fod->req.cqe = &fod->rspiubuf.cqe;
2540 if (tgtport->pe)
2541 fod->req.port = tgtport->pe->port;
2542
2543
2544 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2545
2546 fod->data_sg = NULL;
2547 fod->data_sg_cnt = 0;
2548
2549 ret = nvmet_req_init(&fod->req,
2550 &fod->queue->nvme_cq,
2551 &fod->queue->nvme_sq,
2552 &nvmet_fc_tgt_fcp_ops);
2553 if (!ret) {
2554
2555
2556 return;
2557 }
2558
2559 fod->req.transfer_len = xfrlen;
2560
2561
2562 atomic_inc(&fod->queue->sqtail);
2563
2564 if (fod->req.transfer_len) {
2565 ret = nvmet_fc_alloc_tgt_pgs(fod);
2566 if (ret) {
2567 nvmet_req_complete(&fod->req, ret);
2568 return;
2569 }
2570 }
2571 fod->req.sg = fod->data_sg;
2572 fod->req.sg_cnt = fod->data_sg_cnt;
2573 fod->offset = 0;
2574
2575 if (fod->io_dir == NVMET_FCP_WRITE) {
2576
2577 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2578 return;
2579 }
2580
2581
2582
2583
2584
2585
2586
2587 fod->req.execute(&fod->req);
2588 return;
2589
2590transport_error:
2591 nvmet_fc_abort_op(tgtport, fod);
2592}
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641int
2642nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2643 struct nvmefc_tgt_fcp_req *fcpreq,
2644 void *cmdiubuf, u32 cmdiubuf_len)
2645{
2646 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2647 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2648 struct nvmet_fc_tgt_queue *queue;
2649 struct nvmet_fc_fcp_iod *fod;
2650 struct nvmet_fc_defer_fcp_req *deferfcp;
2651 unsigned long flags;
2652
2653
2654 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2655 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2656 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2657 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2658 return -EIO;
2659
2660 queue = nvmet_fc_find_target_queue(tgtport,
2661 be64_to_cpu(cmdiu->connection_id));
2662 if (!queue)
2663 return -ENOTCONN;
2664
2665
2666
2667
2668
2669
2670
2671
2672 spin_lock_irqsave(&queue->qlock, flags);
2673
2674 fod = nvmet_fc_alloc_fcp_iod(queue);
2675 if (fod) {
2676 spin_unlock_irqrestore(&queue->qlock, flags);
2677
2678 fcpreq->nvmet_fc_private = fod;
2679 fod->fcpreq = fcpreq;
2680
2681 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2682
2683 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2684
2685 return 0;
2686 }
2687
2688 if (!tgtport->ops->defer_rcv) {
2689 spin_unlock_irqrestore(&queue->qlock, flags);
2690
2691 nvmet_fc_tgt_q_put(queue);
2692 return -ENOENT;
2693 }
2694
2695 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2696 struct nvmet_fc_defer_fcp_req, req_list);
2697 if (deferfcp) {
2698
2699 list_del(&deferfcp->req_list);
2700 } else {
2701 spin_unlock_irqrestore(&queue->qlock, flags);
2702
2703
2704 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2705 if (!deferfcp) {
2706
2707 nvmet_fc_tgt_q_put(queue);
2708 return -ENOMEM;
2709 }
2710 spin_lock_irqsave(&queue->qlock, flags);
2711 }
2712
2713
2714 fcpreq->rspaddr = cmdiubuf;
2715 fcpreq->rsplen = cmdiubuf_len;
2716 deferfcp->fcp_req = fcpreq;
2717
2718
2719 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2720
2721
2722
2723 spin_unlock_irqrestore(&queue->qlock, flags);
2724
2725 return -EOVERFLOW;
2726}
2727EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752void
2753nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2754 struct nvmefc_tgt_fcp_req *fcpreq)
2755{
2756 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2757 struct nvmet_fc_tgt_queue *queue;
2758 unsigned long flags;
2759
2760 if (!fod || fod->fcpreq != fcpreq)
2761
2762 return;
2763
2764 queue = fod->queue;
2765
2766 spin_lock_irqsave(&queue->qlock, flags);
2767 if (fod->active) {
2768
2769
2770
2771
2772
2773 spin_lock(&fod->flock);
2774 fod->abort = true;
2775 fod->aborted = true;
2776 spin_unlock(&fod->flock);
2777 }
2778 spin_unlock_irqrestore(&queue->qlock, flags);
2779}
2780EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2781
2782
2783struct nvmet_fc_traddr {
2784 u64 nn;
2785 u64 pn;
2786};
2787
2788static int
2789__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2790{
2791 u64 token64;
2792
2793 if (match_u64(sstr, &token64))
2794 return -EINVAL;
2795 *val = token64;
2796
2797 return 0;
2798}
2799
2800
2801
2802
2803
2804
2805static int
2806nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2807{
2808 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2809 substring_t wwn = { name, &name[sizeof(name)-1] };
2810 int nnoffset, pnoffset;
2811
2812
2813 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2814 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2815 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2816 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2817 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2818 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2819 NVME_FC_TRADDR_OXNNLEN;
2820 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2821 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2822 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2823 "pn-", NVME_FC_TRADDR_NNLEN))) {
2824 nnoffset = NVME_FC_TRADDR_NNLEN;
2825 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2826 } else
2827 goto out_einval;
2828
2829 name[0] = '0';
2830 name[1] = 'x';
2831 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2832
2833 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2834 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2835 goto out_einval;
2836
2837 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2838 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2839 goto out_einval;
2840
2841 return 0;
2842
2843out_einval:
2844 pr_warn("%s: bad traddr string\n", __func__);
2845 return -EINVAL;
2846}
2847
2848static int
2849nvmet_fc_add_port(struct nvmet_port *port)
2850{
2851 struct nvmet_fc_tgtport *tgtport;
2852 struct nvmet_fc_port_entry *pe;
2853 struct nvmet_fc_traddr traddr = { 0L, 0L };
2854 unsigned long flags;
2855 int ret;
2856
2857
2858 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2859 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2860 return -EINVAL;
2861
2862
2863
2864 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2865 sizeof(port->disc_addr.traddr));
2866 if (ret)
2867 return ret;
2868
2869 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2870 if (!pe)
2871 return -ENOMEM;
2872
2873 ret = -ENXIO;
2874 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2875 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2876 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2877 (tgtport->fc_target_port.port_name == traddr.pn)) {
2878
2879 if (!tgtport->pe) {
2880 nvmet_fc_portentry_bind(tgtport, pe, port);
2881 ret = 0;
2882 } else
2883 ret = -EALREADY;
2884 break;
2885 }
2886 }
2887 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2888
2889 if (ret)
2890 kfree(pe);
2891
2892 return ret;
2893}
2894
2895static void
2896nvmet_fc_remove_port(struct nvmet_port *port)
2897{
2898 struct nvmet_fc_port_entry *pe = port->priv;
2899
2900 nvmet_fc_portentry_unbind(pe);
2901
2902 kfree(pe);
2903}
2904
2905static void
2906nvmet_fc_discovery_chg(struct nvmet_port *port)
2907{
2908 struct nvmet_fc_port_entry *pe = port->priv;
2909 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2910
2911 if (tgtport && tgtport->ops->discovery_event)
2912 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2913}
2914
2915static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2916 .owner = THIS_MODULE,
2917 .type = NVMF_TRTYPE_FC,
2918 .msdbd = 1,
2919 .add_port = nvmet_fc_add_port,
2920 .remove_port = nvmet_fc_remove_port,
2921 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2922 .delete_ctrl = nvmet_fc_delete_ctrl,
2923 .discovery_chg = nvmet_fc_discovery_chg,
2924};
2925
2926static int __init nvmet_fc_init_module(void)
2927{
2928 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2929}
2930
2931static void __exit nvmet_fc_exit_module(void)
2932{
2933
2934 if (!list_empty(&nvmet_fc_target_list))
2935 pr_warn("%s: targetport list not empty\n", __func__);
2936
2937 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2938
2939 ida_destroy(&nvmet_fc_tgtport_cnt);
2940}
2941
2942module_init(nvmet_fc_init_module);
2943module_exit(nvmet_fc_exit_module);
2944
2945MODULE_LICENSE("GPL v2");
2946