1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/blk-mq.h>
9#include <linux/parser.h>
10#include <linux/random.h>
11#include <uapi/scsi/fc/fc_fs.h>
12#include <uapi/scsi/fc/fc_els.h>
13
14#include "nvmet.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17#include "../host/fc.h"
18
19
20
21
22
23#define NVMET_LS_CTX_COUNT 256
24
25struct nvmet_fc_tgtport;
26struct nvmet_fc_tgt_assoc;
27
28struct nvmet_fc_ls_iod {
29 struct nvmefc_ls_rsp *lsrsp;
30 struct nvmefc_tgt_fcp_req *fcpreq;
31
32 struct list_head ls_rcv_list;
33
34 struct nvmet_fc_tgtport *tgtport;
35 struct nvmet_fc_tgt_assoc *assoc;
36 void *hosthandle;
37
38 union nvmefc_ls_requests *rqstbuf;
39 union nvmefc_ls_responses *rspbuf;
40 u16 rqstdatalen;
41 dma_addr_t rspdma;
42
43 struct scatterlist sg[2];
44
45 struct work_struct work;
46} __aligned(sizeof(unsigned long long));
47
48struct nvmet_fc_ls_req_op {
49 struct nvmefc_ls_req ls_req;
50
51 struct nvmet_fc_tgtport *tgtport;
52 void *hosthandle;
53
54 int ls_error;
55 struct list_head lsreq_list;
56 bool req_queued;
57};
58
59
60
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62
63enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68};
69
70struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *next_sg;
77 struct scatterlist *data_sg;
78 int data_sg_cnt;
79 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
83 bool aborted;
84 bool writedataactive;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct defer_work;
89
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
92
93 struct list_head fcp_list;
94};
95
96struct nvmet_fc_tgtport {
97 struct nvmet_fc_target_port fc_target_port;
98
99 struct list_head tgt_list;
100 struct device *dev;
101 struct nvmet_fc_target_template *ops;
102
103 struct nvmet_fc_ls_iod *iod;
104 spinlock_t lock;
105 struct list_head ls_rcv_list;
106 struct list_head ls_req_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct list_head host_list;
110 struct ida assoc_cnt;
111 struct nvmet_fc_port_entry *pe;
112 struct kref ref;
113 u32 max_sg_cnt;
114};
115
116struct nvmet_fc_port_entry {
117 struct nvmet_fc_tgtport *tgtport;
118 struct nvmet_port *port;
119 u64 node_name;
120 u64 port_name;
121 struct list_head pe_list;
122};
123
124struct nvmet_fc_defer_fcp_req {
125 struct list_head req_list;
126 struct nvmefc_tgt_fcp_req *fcp_req;
127};
128
129struct nvmet_fc_tgt_queue {
130 bool ninetypercent;
131 u16 qid;
132 u16 sqsize;
133 u16 ersp_ratio;
134 __le16 sqhd;
135 atomic_t connected;
136 atomic_t sqtail;
137 atomic_t zrspcnt;
138 atomic_t rsn;
139 spinlock_t qlock;
140 struct nvmet_cq nvme_cq;
141 struct nvmet_sq nvme_sq;
142 struct nvmet_fc_tgt_assoc *assoc;
143 struct list_head fod_list;
144 struct list_head pending_cmd_list;
145 struct list_head avail_defer_list;
146 struct workqueue_struct *work_q;
147 struct kref ref;
148 struct nvmet_fc_fcp_iod fod[];
149} __aligned(sizeof(unsigned long long));
150
151struct nvmet_fc_hostport {
152 struct nvmet_fc_tgtport *tgtport;
153 void *hosthandle;
154 struct list_head host_list;
155 struct kref ref;
156 u8 invalid;
157};
158
159struct nvmet_fc_tgt_assoc {
160 u64 association_id;
161 u32 a_id;
162 atomic_t terminating;
163 struct nvmet_fc_tgtport *tgtport;
164 struct nvmet_fc_hostport *hostport;
165 struct nvmet_fc_ls_iod *rcv_disconn;
166 struct list_head a_list;
167 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
168 struct kref ref;
169 struct work_struct del_work;
170};
171
172
173static inline int
174nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
175{
176 return (iodptr - iodptr->tgtport->iod);
177}
178
179static inline int
180nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
181{
182 return (fodptr - fodptr->queue->fod);
183}
184
185
186
187
188
189
190
191
192
193
194
195
196#define BYTES_FOR_QID sizeof(u16)
197#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
198#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
199
200static inline u64
201nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
202{
203 return (assoc->association_id | qid);
204}
205
206static inline u64
207nvmet_fc_getassociationid(u64 connectionid)
208{
209 return connectionid & ~NVMET_FC_QUEUEID_MASK;
210}
211
212static inline u16
213nvmet_fc_getqueueid(u64 connectionid)
214{
215 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
216}
217
218static inline struct nvmet_fc_tgtport *
219targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
220{
221 return container_of(targetport, struct nvmet_fc_tgtport,
222 fc_target_port);
223}
224
225static inline struct nvmet_fc_fcp_iod *
226nvmet_req_to_fod(struct nvmet_req *nvme_req)
227{
228 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
229}
230
231
232
233
234
235static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
236
237static LIST_HEAD(nvmet_fc_target_list);
238static DEFINE_IDA(nvmet_fc_tgtport_cnt);
239static LIST_HEAD(nvmet_fc_portentry_list);
240
241
242static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
243static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
244static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
245static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
246static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
247static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
248static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
249static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
250static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
251 struct nvmet_fc_fcp_iod *fod);
252static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
253static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
254 struct nvmet_fc_ls_iod *iod);
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275static inline dma_addr_t
276fc_dma_map_single(struct device *dev, void *ptr, size_t size,
277 enum dma_data_direction dir)
278{
279 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
280}
281
282static inline int
283fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
284{
285 return dev ? dma_mapping_error(dev, dma_addr) : 0;
286}
287
288static inline void
289fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
290 enum dma_data_direction dir)
291{
292 if (dev)
293 dma_unmap_single(dev, addr, size, dir);
294}
295
296static inline void
297fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
298 enum dma_data_direction dir)
299{
300 if (dev)
301 dma_sync_single_for_cpu(dev, addr, size, dir);
302}
303
304static inline void
305fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
306 enum dma_data_direction dir)
307{
308 if (dev)
309 dma_sync_single_for_device(dev, addr, size, dir);
310}
311
312
313static int
314fc_map_sg(struct scatterlist *sg, int nents)
315{
316 struct scatterlist *s;
317 int i;
318
319 WARN_ON(nents == 0 || sg[0].length == 0);
320
321 for_each_sg(sg, s, nents, i) {
322 s->dma_address = 0L;
323#ifdef CONFIG_NEED_SG_DMA_LENGTH
324 s->dma_length = s->length;
325#endif
326 }
327 return nents;
328}
329
330static inline int
331fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
332 enum dma_data_direction dir)
333{
334 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
335}
336
337static inline void
338fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
339 enum dma_data_direction dir)
340{
341 if (dev)
342 dma_unmap_sg(dev, sg, nents, dir);
343}
344
345
346
347
348
349static void
350__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
351{
352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
353 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
354 unsigned long flags;
355
356 spin_lock_irqsave(&tgtport->lock, flags);
357
358 if (!lsop->req_queued) {
359 spin_unlock_irqrestore(&tgtport->lock, flags);
360 return;
361 }
362
363 list_del(&lsop->lsreq_list);
364
365 lsop->req_queued = false;
366
367 spin_unlock_irqrestore(&tgtport->lock, flags);
368
369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
370 (lsreq->rqstlen + lsreq->rsplen),
371 DMA_BIDIRECTIONAL);
372
373 nvmet_fc_tgtport_put(tgtport);
374}
375
376static int
377__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
378 struct nvmet_fc_ls_req_op *lsop,
379 void (*done)(struct nvmefc_ls_req *req, int status))
380{
381 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
382 unsigned long flags;
383 int ret = 0;
384
385 if (!tgtport->ops->ls_req)
386 return -EOPNOTSUPP;
387
388 if (!nvmet_fc_tgtport_get(tgtport))
389 return -ESHUTDOWN;
390
391 lsreq->done = done;
392 lsop->req_queued = false;
393 INIT_LIST_HEAD(&lsop->lsreq_list);
394
395 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
396 lsreq->rqstlen + lsreq->rsplen,
397 DMA_BIDIRECTIONAL);
398 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
399 ret = -EFAULT;
400 goto out_puttgtport;
401 }
402 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
403
404 spin_lock_irqsave(&tgtport->lock, flags);
405
406 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
407
408 lsop->req_queued = true;
409
410 spin_unlock_irqrestore(&tgtport->lock, flags);
411
412 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
413 lsreq);
414 if (ret)
415 goto out_unlink;
416
417 return 0;
418
419out_unlink:
420 lsop->ls_error = ret;
421 spin_lock_irqsave(&tgtport->lock, flags);
422 lsop->req_queued = false;
423 list_del(&lsop->lsreq_list);
424 spin_unlock_irqrestore(&tgtport->lock, flags);
425 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
426 (lsreq->rqstlen + lsreq->rsplen),
427 DMA_BIDIRECTIONAL);
428out_puttgtport:
429 nvmet_fc_tgtport_put(tgtport);
430
431 return ret;
432}
433
434static int
435nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
436 struct nvmet_fc_ls_req_op *lsop,
437 void (*done)(struct nvmefc_ls_req *req, int status))
438{
439
440
441 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
442}
443
444static void
445nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
446{
447 struct nvmet_fc_ls_req_op *lsop =
448 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
449
450 __nvmet_fc_finish_ls_req(lsop);
451
452
453
454 kfree(lsop);
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static void
475nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
476{
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
478 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
479 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
480 struct nvmet_fc_ls_req_op *lsop;
481 struct nvmefc_ls_req *lsreq;
482 int ret;
483
484
485
486
487
488
489 if (!tgtport->ops->ls_req || !assoc->hostport ||
490 assoc->hostport->invalid)
491 return;
492
493 lsop = kzalloc((sizeof(*lsop) +
494 sizeof(*discon_rqst) + sizeof(*discon_acc) +
495 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
496 if (!lsop) {
497 dev_info(tgtport->dev,
498 "{%d:%d} send Disconnect Association failed: ENOMEM\n",
499 tgtport->fc_target_port.port_num, assoc->a_id);
500 return;
501 }
502
503 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
504 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
505 lsreq = &lsop->ls_req;
506 if (tgtport->ops->lsrqst_priv_sz)
507 lsreq->private = (void *)&discon_acc[1];
508 else
509 lsreq->private = NULL;
510
511 lsop->tgtport = tgtport;
512 lsop->hosthandle = assoc->hostport->hosthandle;
513
514 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
515 assoc->association_id);
516
517 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
518 nvmet_fc_disconnect_assoc_done);
519 if (ret) {
520 dev_info(tgtport->dev,
521 "{%d:%d} XMT Disconnect Association failed: %d\n",
522 tgtport->fc_target_port.port_num, assoc->a_id, ret);
523 kfree(lsop);
524 }
525}
526
527
528
529
530
531static int
532nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
533{
534 struct nvmet_fc_ls_iod *iod;
535 int i;
536
537 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
538 GFP_KERNEL);
539 if (!iod)
540 return -ENOMEM;
541
542 tgtport->iod = iod;
543
544 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
545 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
546 iod->tgtport = tgtport;
547 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
548
549 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
550 sizeof(union nvmefc_ls_responses),
551 GFP_KERNEL);
552 if (!iod->rqstbuf)
553 goto out_fail;
554
555 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
556
557 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
558 sizeof(*iod->rspbuf),
559 DMA_TO_DEVICE);
560 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
561 goto out_fail;
562 }
563
564 return 0;
565
566out_fail:
567 kfree(iod->rqstbuf);
568 list_del(&iod->ls_rcv_list);
569 for (iod--, i--; i >= 0; iod--, i--) {
570 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
571 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
572 kfree(iod->rqstbuf);
573 list_del(&iod->ls_rcv_list);
574 }
575
576 kfree(iod);
577
578 return -EFAULT;
579}
580
581static void
582nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
583{
584 struct nvmet_fc_ls_iod *iod = tgtport->iod;
585 int i;
586
587 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
588 fc_dma_unmap_single(tgtport->dev,
589 iod->rspdma, sizeof(*iod->rspbuf),
590 DMA_TO_DEVICE);
591 kfree(iod->rqstbuf);
592 list_del(&iod->ls_rcv_list);
593 }
594 kfree(tgtport->iod);
595}
596
597static struct nvmet_fc_ls_iod *
598nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
599{
600 struct nvmet_fc_ls_iod *iod;
601 unsigned long flags;
602
603 spin_lock_irqsave(&tgtport->lock, flags);
604 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
605 struct nvmet_fc_ls_iod, ls_rcv_list);
606 if (iod)
607 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
608 spin_unlock_irqrestore(&tgtport->lock, flags);
609 return iod;
610}
611
612
613static void
614nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
615 struct nvmet_fc_ls_iod *iod)
616{
617 unsigned long flags;
618
619 spin_lock_irqsave(&tgtport->lock, flags);
620 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
621 spin_unlock_irqrestore(&tgtport->lock, flags);
622}
623
624static void
625nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
626 struct nvmet_fc_tgt_queue *queue)
627{
628 struct nvmet_fc_fcp_iod *fod = queue->fod;
629 int i;
630
631 for (i = 0; i < queue->sqsize; fod++, i++) {
632 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
633 fod->tgtport = tgtport;
634 fod->queue = queue;
635 fod->active = false;
636 fod->abort = false;
637 fod->aborted = false;
638 fod->fcpreq = NULL;
639 list_add_tail(&fod->fcp_list, &queue->fod_list);
640 spin_lock_init(&fod->flock);
641
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
643 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
645 list_del(&fod->fcp_list);
646 for (fod--, i--; i >= 0; fod--, i--) {
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
648 sizeof(fod->rspiubuf),
649 DMA_TO_DEVICE);
650 fod->rspdma = 0L;
651 list_del(&fod->fcp_list);
652 }
653
654 return;
655 }
656 }
657}
658
659static void
660nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
661 struct nvmet_fc_tgt_queue *queue)
662{
663 struct nvmet_fc_fcp_iod *fod = queue->fod;
664 int i;
665
666 for (i = 0; i < queue->sqsize; fod++, i++) {
667 if (fod->rspdma)
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
669 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
670 }
671}
672
673static struct nvmet_fc_fcp_iod *
674nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
675{
676 struct nvmet_fc_fcp_iod *fod;
677
678 lockdep_assert_held(&queue->qlock);
679
680 fod = list_first_entry_or_null(&queue->fod_list,
681 struct nvmet_fc_fcp_iod, fcp_list);
682 if (fod) {
683 list_del(&fod->fcp_list);
684 fod->active = true;
685
686
687
688
689
690 }
691 return fod;
692}
693
694
695static void
696nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
697 struct nvmet_fc_tgt_queue *queue,
698 struct nvmefc_tgt_fcp_req *fcpreq)
699{
700 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
701
702
703
704
705
706 fcpreq->hwqid = queue->qid ?
707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
708
709 nvmet_fc_handle_fcp_rqst(tgtport, fod);
710}
711
712static void
713nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
714{
715 struct nvmet_fc_fcp_iod *fod =
716 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
717
718
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
720
721}
722
723static void
724nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
725 struct nvmet_fc_fcp_iod *fod)
726{
727 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
729 struct nvmet_fc_defer_fcp_req *deferfcp;
730 unsigned long flags;
731
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
733 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
734
735 fcpreq->nvmet_fc_private = NULL;
736
737 fod->active = false;
738 fod->abort = false;
739 fod->aborted = false;
740 fod->writedataactive = false;
741 fod->fcpreq = NULL;
742
743 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
744
745
746 nvmet_fc_tgt_q_put(queue);
747
748 spin_lock_irqsave(&queue->qlock, flags);
749 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
750 struct nvmet_fc_defer_fcp_req, req_list);
751 if (!deferfcp) {
752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
753 spin_unlock_irqrestore(&queue->qlock, flags);
754 return;
755 }
756
757
758 list_del(&deferfcp->req_list);
759
760 fcpreq = deferfcp->fcp_req;
761
762
763 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
764
765 spin_unlock_irqrestore(&queue->qlock, flags);
766
767
768 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
769
770
771 fcpreq->rspaddr = NULL;
772 fcpreq->rsplen = 0;
773 fcpreq->nvmet_fc_private = fod;
774 fod->fcpreq = fcpreq;
775 fod->active = true;
776
777
778 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
779
780
781
782
783
784
785 queue_work(queue->work_q, &fod->defer_work);
786}
787
788static struct nvmet_fc_tgt_queue *
789nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
790 u16 qid, u16 sqsize)
791{
792 struct nvmet_fc_tgt_queue *queue;
793 unsigned long flags;
794 int ret;
795
796 if (qid > NVMET_NR_QUEUES)
797 return NULL;
798
799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
800 if (!queue)
801 return NULL;
802
803 if (!nvmet_fc_tgt_a_get(assoc))
804 goto out_free_queue;
805
806 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
807 assoc->tgtport->fc_target_port.port_num,
808 assoc->a_id, qid);
809 if (!queue->work_q)
810 goto out_a_put;
811
812 queue->qid = qid;
813 queue->sqsize = sqsize;
814 queue->assoc = assoc;
815 INIT_LIST_HEAD(&queue->fod_list);
816 INIT_LIST_HEAD(&queue->avail_defer_list);
817 INIT_LIST_HEAD(&queue->pending_cmd_list);
818 atomic_set(&queue->connected, 0);
819 atomic_set(&queue->sqtail, 0);
820 atomic_set(&queue->rsn, 1);
821 atomic_set(&queue->zrspcnt, 0);
822 spin_lock_init(&queue->qlock);
823 kref_init(&queue->ref);
824
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
826
827 ret = nvmet_sq_init(&queue->nvme_sq);
828 if (ret)
829 goto out_fail_iodlist;
830
831 WARN_ON(assoc->queues[qid]);
832 spin_lock_irqsave(&assoc->tgtport->lock, flags);
833 assoc->queues[qid] = queue;
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
835
836 return queue;
837
838out_fail_iodlist:
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
840 destroy_workqueue(queue->work_q);
841out_a_put:
842 nvmet_fc_tgt_a_put(assoc);
843out_free_queue:
844 kfree(queue);
845 return NULL;
846}
847
848
849static void
850nvmet_fc_tgt_queue_free(struct kref *ref)
851{
852 struct nvmet_fc_tgt_queue *queue =
853 container_of(ref, struct nvmet_fc_tgt_queue, ref);
854 unsigned long flags;
855
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
857 queue->assoc->queues[queue->qid] = NULL;
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
859
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
861
862 nvmet_fc_tgt_a_put(queue->assoc);
863
864 destroy_workqueue(queue->work_q);
865
866 kfree(queue);
867}
868
869static void
870nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
871{
872 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
873}
874
875static int
876nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
877{
878 return kref_get_unless_zero(&queue->ref);
879}
880
881
882static void
883nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
884{
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
886 struct nvmet_fc_fcp_iod *fod = queue->fod;
887 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
888 unsigned long flags;
889 int i;
890 bool disconnect;
891
892 disconnect = atomic_xchg(&queue->connected, 0);
893
894
895 if (!disconnect)
896 return;
897
898 spin_lock_irqsave(&queue->qlock, flags);
899
900 for (i = 0; i < queue->sqsize; fod++, i++) {
901 if (fod->active) {
902 spin_lock(&fod->flock);
903 fod->abort = true;
904
905
906
907
908
909 if (fod->writedataactive) {
910 fod->aborted = true;
911 spin_unlock(&fod->flock);
912 tgtport->ops->fcp_abort(
913 &tgtport->fc_target_port, fod->fcpreq);
914 } else
915 spin_unlock(&fod->flock);
916 }
917 }
918
919
920 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
921 req_list) {
922 list_del(&deferfcp->req_list);
923 kfree(deferfcp);
924 }
925
926 for (;;) {
927 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
928 struct nvmet_fc_defer_fcp_req, req_list);
929 if (!deferfcp)
930 break;
931
932 list_del(&deferfcp->req_list);
933 spin_unlock_irqrestore(&queue->qlock, flags);
934
935 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
936 deferfcp->fcp_req);
937
938 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
939 deferfcp->fcp_req);
940
941 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
942 deferfcp->fcp_req);
943
944
945 nvmet_fc_tgt_q_put(queue);
946
947 kfree(deferfcp);
948
949 spin_lock_irqsave(&queue->qlock, flags);
950 }
951 spin_unlock_irqrestore(&queue->qlock, flags);
952
953 flush_workqueue(queue->work_q);
954
955 nvmet_sq_destroy(&queue->nvme_sq);
956
957 nvmet_fc_tgt_q_put(queue);
958}
959
960static struct nvmet_fc_tgt_queue *
961nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
962 u64 connection_id)
963{
964 struct nvmet_fc_tgt_assoc *assoc;
965 struct nvmet_fc_tgt_queue *queue;
966 u64 association_id = nvmet_fc_getassociationid(connection_id);
967 u16 qid = nvmet_fc_getqueueid(connection_id);
968 unsigned long flags;
969
970 if (qid > NVMET_NR_QUEUES)
971 return NULL;
972
973 spin_lock_irqsave(&tgtport->lock, flags);
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
975 if (association_id == assoc->association_id) {
976 queue = assoc->queues[qid];
977 if (queue &&
978 (!atomic_read(&queue->connected) ||
979 !nvmet_fc_tgt_q_get(queue)))
980 queue = NULL;
981 spin_unlock_irqrestore(&tgtport->lock, flags);
982 return queue;
983 }
984 }
985 spin_unlock_irqrestore(&tgtport->lock, flags);
986 return NULL;
987}
988
989static void
990nvmet_fc_hostport_free(struct kref *ref)
991{
992 struct nvmet_fc_hostport *hostport =
993 container_of(ref, struct nvmet_fc_hostport, ref);
994 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
995 unsigned long flags;
996
997 spin_lock_irqsave(&tgtport->lock, flags);
998 list_del(&hostport->host_list);
999 spin_unlock_irqrestore(&tgtport->lock, flags);
1000 if (tgtport->ops->host_release && hostport->invalid)
1001 tgtport->ops->host_release(hostport->hosthandle);
1002 kfree(hostport);
1003 nvmet_fc_tgtport_put(tgtport);
1004}
1005
1006static void
1007nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
1008{
1009 kref_put(&hostport->ref, nvmet_fc_hostport_free);
1010}
1011
1012static int
1013nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
1014{
1015 return kref_get_unless_zero(&hostport->ref);
1016}
1017
1018static void
1019nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1020{
1021
1022 if (!hostport || !hostport->hosthandle)
1023 return;
1024
1025 nvmet_fc_hostport_put(hostport);
1026}
1027
1028static struct nvmet_fc_hostport *
1029nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1030{
1031 struct nvmet_fc_hostport *newhost, *host, *match = NULL;
1032 unsigned long flags;
1033
1034
1035 if (!hosthandle)
1036 return NULL;
1037
1038
1039 if (!nvmet_fc_tgtport_get(tgtport))
1040 return ERR_PTR(-EINVAL);
1041
1042 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1043 if (!newhost) {
1044 spin_lock_irqsave(&tgtport->lock, flags);
1045 list_for_each_entry(host, &tgtport->host_list, host_list) {
1046 if (host->hosthandle == hosthandle && !host->invalid) {
1047 if (nvmet_fc_hostport_get(host)) {
1048 match = host;
1049 break;
1050 }
1051 }
1052 }
1053 spin_unlock_irqrestore(&tgtport->lock, flags);
1054
1055 nvmet_fc_tgtport_put(tgtport);
1056 return (match) ? match : ERR_PTR(-ENOMEM);
1057 }
1058
1059 newhost->tgtport = tgtport;
1060 newhost->hosthandle = hosthandle;
1061 INIT_LIST_HEAD(&newhost->host_list);
1062 kref_init(&newhost->ref);
1063
1064 spin_lock_irqsave(&tgtport->lock, flags);
1065 list_for_each_entry(host, &tgtport->host_list, host_list) {
1066 if (host->hosthandle == hosthandle && !host->invalid) {
1067 if (nvmet_fc_hostport_get(host)) {
1068 match = host;
1069 break;
1070 }
1071 }
1072 }
1073 if (match) {
1074 kfree(newhost);
1075 newhost = NULL;
1076
1077 nvmet_fc_tgtport_put(tgtport);
1078 } else
1079 list_add_tail(&newhost->host_list, &tgtport->host_list);
1080 spin_unlock_irqrestore(&tgtport->lock, flags);
1081
1082 return (match) ? match : newhost;
1083}
1084
1085static void
1086nvmet_fc_delete_assoc(struct work_struct *work)
1087{
1088 struct nvmet_fc_tgt_assoc *assoc =
1089 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1090
1091 nvmet_fc_delete_target_assoc(assoc);
1092 nvmet_fc_tgt_a_put(assoc);
1093}
1094
1095static struct nvmet_fc_tgt_assoc *
1096nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1097{
1098 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1099 unsigned long flags;
1100 u64 ran;
1101 int idx;
1102 bool needrandom = true;
1103
1104 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1105 if (!assoc)
1106 return NULL;
1107
1108 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
1109 if (idx < 0)
1110 goto out_free_assoc;
1111
1112 if (!nvmet_fc_tgtport_get(tgtport))
1113 goto out_ida;
1114
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1116 if (IS_ERR(assoc->hostport))
1117 goto out_put;
1118
1119 assoc->tgtport = tgtport;
1120 assoc->a_id = idx;
1121 INIT_LIST_HEAD(&assoc->a_list);
1122 kref_init(&assoc->ref);
1123 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1124 atomic_set(&assoc->terminating, 0);
1125
1126 while (needrandom) {
1127 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1128 ran = ran << BYTES_FOR_QID_SHIFT;
1129
1130 spin_lock_irqsave(&tgtport->lock, flags);
1131 needrandom = false;
1132 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1133 if (ran == tmpassoc->association_id) {
1134 needrandom = true;
1135 break;
1136 }
1137 }
1138 if (!needrandom) {
1139 assoc->association_id = ran;
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
1141 }
1142 spin_unlock_irqrestore(&tgtport->lock, flags);
1143 }
1144
1145 return assoc;
1146
1147out_put:
1148 nvmet_fc_tgtport_put(tgtport);
1149out_ida:
1150 ida_simple_remove(&tgtport->assoc_cnt, idx);
1151out_free_assoc:
1152 kfree(assoc);
1153 return NULL;
1154}
1155
1156static void
1157nvmet_fc_target_assoc_free(struct kref *ref)
1158{
1159 struct nvmet_fc_tgt_assoc *assoc =
1160 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1162 struct nvmet_fc_ls_iod *oldls;
1163 unsigned long flags;
1164
1165
1166 nvmet_fc_xmt_disconnect_assoc(assoc);
1167
1168 nvmet_fc_free_hostport(assoc->hostport);
1169 spin_lock_irqsave(&tgtport->lock, flags);
1170 list_del(&assoc->a_list);
1171 oldls = assoc->rcv_disconn;
1172 spin_unlock_irqrestore(&tgtport->lock, flags);
1173
1174 if (oldls)
1175 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1177 dev_info(tgtport->dev,
1178 "{%d:%d} Association freed\n",
1179 tgtport->fc_target_port.port_num, assoc->a_id);
1180 kfree(assoc);
1181 nvmet_fc_tgtport_put(tgtport);
1182}
1183
1184static void
1185nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1186{
1187 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1188}
1189
1190static int
1191nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1192{
1193 return kref_get_unless_zero(&assoc->ref);
1194}
1195
1196static void
1197nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1198{
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1200 struct nvmet_fc_tgt_queue *queue;
1201 unsigned long flags;
1202 int i, terminating;
1203
1204 terminating = atomic_xchg(&assoc->terminating, 1);
1205
1206
1207 if (terminating)
1208 return;
1209
1210 spin_lock_irqsave(&tgtport->lock, flags);
1211 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1212 queue = assoc->queues[i];
1213 if (queue) {
1214 if (!nvmet_fc_tgt_q_get(queue))
1215 continue;
1216 spin_unlock_irqrestore(&tgtport->lock, flags);
1217 nvmet_fc_delete_target_queue(queue);
1218 nvmet_fc_tgt_q_put(queue);
1219 spin_lock_irqsave(&tgtport->lock, flags);
1220 }
1221 }
1222 spin_unlock_irqrestore(&tgtport->lock, flags);
1223
1224 dev_info(tgtport->dev,
1225 "{%d:%d} Association deleted\n",
1226 tgtport->fc_target_port.port_num, assoc->a_id);
1227
1228 nvmet_fc_tgt_a_put(assoc);
1229}
1230
1231static struct nvmet_fc_tgt_assoc *
1232nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1233 u64 association_id)
1234{
1235 struct nvmet_fc_tgt_assoc *assoc;
1236 struct nvmet_fc_tgt_assoc *ret = NULL;
1237 unsigned long flags;
1238
1239 spin_lock_irqsave(&tgtport->lock, flags);
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1241 if (association_id == assoc->association_id) {
1242 ret = assoc;
1243 if (!nvmet_fc_tgt_a_get(assoc))
1244 ret = NULL;
1245 break;
1246 }
1247 }
1248 spin_unlock_irqrestore(&tgtport->lock, flags);
1249
1250 return ret;
1251}
1252
1253static void
1254nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1255 struct nvmet_fc_port_entry *pe,
1256 struct nvmet_port *port)
1257{
1258 lockdep_assert_held(&nvmet_fc_tgtlock);
1259
1260 pe->tgtport = tgtport;
1261 tgtport->pe = pe;
1262
1263 pe->port = port;
1264 port->priv = pe;
1265
1266 pe->node_name = tgtport->fc_target_port.node_name;
1267 pe->port_name = tgtport->fc_target_port.port_name;
1268 INIT_LIST_HEAD(&pe->pe_list);
1269
1270 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1271}
1272
1273static void
1274nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1275{
1276 unsigned long flags;
1277
1278 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1279 if (pe->tgtport)
1280 pe->tgtport->pe = NULL;
1281 list_del(&pe->pe_list);
1282 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1283}
1284
1285
1286
1287
1288
1289
1290static void
1291nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1292{
1293 struct nvmet_fc_port_entry *pe;
1294 unsigned long flags;
1295
1296 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1297 pe = tgtport->pe;
1298 if (pe)
1299 pe->tgtport = NULL;
1300 tgtport->pe = NULL;
1301 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static void
1313nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1314{
1315 struct nvmet_fc_port_entry *pe;
1316 unsigned long flags;
1317
1318 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1319 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1320 if (tgtport->fc_target_port.node_name == pe->node_name &&
1321 tgtport->fc_target_port.port_name == pe->port_name) {
1322 WARN_ON(pe->tgtport);
1323 tgtport->pe = pe;
1324 pe->tgtport = tgtport;
1325 break;
1326 }
1327 }
1328 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348int
1349nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1350 struct nvmet_fc_target_template *template,
1351 struct device *dev,
1352 struct nvmet_fc_target_port **portptr)
1353{
1354 struct nvmet_fc_tgtport *newrec;
1355 unsigned long flags;
1356 int ret, idx;
1357
1358 if (!template->xmt_ls_rsp || !template->fcp_op ||
1359 !template->fcp_abort ||
1360 !template->fcp_req_release || !template->targetport_delete ||
1361 !template->max_hw_queues || !template->max_sgl_segments ||
1362 !template->max_dif_sgl_segments || !template->dma_boundary) {
1363 ret = -EINVAL;
1364 goto out_regtgt_failed;
1365 }
1366
1367 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1368 GFP_KERNEL);
1369 if (!newrec) {
1370 ret = -ENOMEM;
1371 goto out_regtgt_failed;
1372 }
1373
1374 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1375 if (idx < 0) {
1376 ret = -ENOSPC;
1377 goto out_fail_kfree;
1378 }
1379
1380 if (!get_device(dev) && dev) {
1381 ret = -ENODEV;
1382 goto out_ida_put;
1383 }
1384
1385 newrec->fc_target_port.node_name = pinfo->node_name;
1386 newrec->fc_target_port.port_name = pinfo->port_name;
1387 if (template->target_priv_sz)
1388 newrec->fc_target_port.private = &newrec[1];
1389 else
1390 newrec->fc_target_port.private = NULL;
1391 newrec->fc_target_port.port_id = pinfo->port_id;
1392 newrec->fc_target_port.port_num = idx;
1393 INIT_LIST_HEAD(&newrec->tgt_list);
1394 newrec->dev = dev;
1395 newrec->ops = template;
1396 spin_lock_init(&newrec->lock);
1397 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1398 INIT_LIST_HEAD(&newrec->ls_req_list);
1399 INIT_LIST_HEAD(&newrec->ls_busylist);
1400 INIT_LIST_HEAD(&newrec->assoc_list);
1401 INIT_LIST_HEAD(&newrec->host_list);
1402 kref_init(&newrec->ref);
1403 ida_init(&newrec->assoc_cnt);
1404 newrec->max_sg_cnt = template->max_sgl_segments;
1405
1406 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1407 if (ret) {
1408 ret = -ENOMEM;
1409 goto out_free_newrec;
1410 }
1411
1412 nvmet_fc_portentry_rebind_tgt(newrec);
1413
1414 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1415 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1416 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1417
1418 *portptr = &newrec->fc_target_port;
1419 return 0;
1420
1421out_free_newrec:
1422 put_device(dev);
1423out_ida_put:
1424 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1425out_fail_kfree:
1426 kfree(newrec);
1427out_regtgt_failed:
1428 *portptr = NULL;
1429 return ret;
1430}
1431EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1432
1433
1434static void
1435nvmet_fc_free_tgtport(struct kref *ref)
1436{
1437 struct nvmet_fc_tgtport *tgtport =
1438 container_of(ref, struct nvmet_fc_tgtport, ref);
1439 struct device *dev = tgtport->dev;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1443 list_del(&tgtport->tgt_list);
1444 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1445
1446 nvmet_fc_free_ls_iodlist(tgtport);
1447
1448
1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1450
1451 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1452 tgtport->fc_target_port.port_num);
1453
1454 ida_destroy(&tgtport->assoc_cnt);
1455
1456 kfree(tgtport);
1457
1458 put_device(dev);
1459}
1460
1461static void
1462nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1463{
1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1465}
1466
1467static int
1468nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1469{
1470 return kref_get_unless_zero(&tgtport->ref);
1471}
1472
1473static void
1474__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1475{
1476 struct nvmet_fc_tgt_assoc *assoc, *next;
1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&tgtport->lock, flags);
1480 list_for_each_entry_safe(assoc, next,
1481 &tgtport->assoc_list, a_list) {
1482 if (!nvmet_fc_tgt_a_get(assoc))
1483 continue;
1484 if (!schedule_work(&assoc->del_work))
1485
1486 nvmet_fc_tgt_a_put(assoc);
1487 }
1488 spin_unlock_irqrestore(&tgtport->lock, flags);
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520void
1521nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1522 void *hosthandle)
1523{
1524 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1525 struct nvmet_fc_tgt_assoc *assoc, *next;
1526 unsigned long flags;
1527 bool noassoc = true;
1528
1529 spin_lock_irqsave(&tgtport->lock, flags);
1530 list_for_each_entry_safe(assoc, next,
1531 &tgtport->assoc_list, a_list) {
1532 if (!assoc->hostport ||
1533 assoc->hostport->hosthandle != hosthandle)
1534 continue;
1535 if (!nvmet_fc_tgt_a_get(assoc))
1536 continue;
1537 assoc->hostport->invalid = 1;
1538 noassoc = false;
1539 if (!schedule_work(&assoc->del_work))
1540
1541 nvmet_fc_tgt_a_put(assoc);
1542 }
1543 spin_unlock_irqrestore(&tgtport->lock, flags);
1544
1545
1546 if (noassoc && tgtport->ops->host_release)
1547 tgtport->ops->host_release(hosthandle);
1548}
1549EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1550
1551
1552
1553
1554static void
1555nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1556{
1557 struct nvmet_fc_tgtport *tgtport, *next;
1558 struct nvmet_fc_tgt_assoc *assoc;
1559 struct nvmet_fc_tgt_queue *queue;
1560 unsigned long flags;
1561 bool found_ctrl = false;
1562
1563
1564 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1565 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1566 tgt_list) {
1567 if (!nvmet_fc_tgtport_get(tgtport))
1568 continue;
1569 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1570
1571 spin_lock_irqsave(&tgtport->lock, flags);
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1573 queue = assoc->queues[0];
1574 if (queue && queue->nvme_sq.ctrl == ctrl) {
1575 if (nvmet_fc_tgt_a_get(assoc))
1576 found_ctrl = true;
1577 break;
1578 }
1579 }
1580 spin_unlock_irqrestore(&tgtport->lock, flags);
1581
1582 nvmet_fc_tgtport_put(tgtport);
1583
1584 if (found_ctrl) {
1585 if (!schedule_work(&assoc->del_work))
1586
1587 nvmet_fc_tgt_a_put(assoc);
1588 return;
1589 }
1590
1591 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1592 }
1593 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607int
1608nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1609{
1610 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1611
1612 nvmet_fc_portentry_unbind_tgt(tgtport);
1613
1614
1615 __nvmet_fc_free_assocs(tgtport);
1616
1617
1618
1619
1620
1621
1622
1623
1624 nvmet_fc_tgtport_put(tgtport);
1625
1626 return 0;
1627}
1628EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1629
1630
1631
1632
1633
1634static void
1635nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1636 struct nvmet_fc_ls_iod *iod)
1637{
1638 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1639 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1640 struct nvmet_fc_tgt_queue *queue;
1641 int ret = 0;
1642
1643 memset(acc, 0, sizeof(*acc));
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1654 ret = VERR_CR_ASSOC_LEN;
1655 else if (be32_to_cpu(rqst->desc_list_len) <
1656 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1657 ret = VERR_CR_ASSOC_RQST_LEN;
1658 else if (rqst->assoc_cmd.desc_tag !=
1659 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1660 ret = VERR_CR_ASSOC_CMD;
1661 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1662 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1663 ret = VERR_CR_ASSOC_CMD_LEN;
1664 else if (!rqst->assoc_cmd.ersp_ratio ||
1665 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1666 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1667 ret = VERR_ERSP_RATIO;
1668
1669 else {
1670
1671 iod->assoc = nvmet_fc_alloc_target_assoc(
1672 tgtport, iod->hosthandle);
1673 if (!iod->assoc)
1674 ret = VERR_ASSOC_ALLOC_FAIL;
1675 else {
1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1677 be16_to_cpu(rqst->assoc_cmd.sqsize));
1678 if (!queue)
1679 ret = VERR_QUEUE_ALLOC_FAIL;
1680 }
1681 }
1682
1683 if (ret) {
1684 dev_err(tgtport->dev,
1685 "Create Association LS failed: %s\n",
1686 validation_errors[ret]);
1687 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1688 sizeof(*acc), rqst->w0.ls_cmd,
1689 FCNVME_RJT_RC_LOGIC,
1690 FCNVME_RJT_EXP_NONE, 0);
1691 return;
1692 }
1693
1694 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1695 atomic_set(&queue->connected, 1);
1696 queue->sqhd = 0;
1697
1698 dev_info(tgtport->dev,
1699 "{%d:%d} Association created\n",
1700 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1701
1702
1703
1704 iod->lsrsp->rsplen = sizeof(*acc);
1705
1706 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1707 fcnvme_lsdesc_len(
1708 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1709 FCNVME_LS_CREATE_ASSOCIATION);
1710 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1711 acc->associd.desc_len =
1712 fcnvme_lsdesc_len(
1713 sizeof(struct fcnvme_lsdesc_assoc_id));
1714 acc->associd.association_id =
1715 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1716 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1717 acc->connectid.desc_len =
1718 fcnvme_lsdesc_len(
1719 sizeof(struct fcnvme_lsdesc_conn_id));
1720 acc->connectid.connection_id = acc->associd.association_id;
1721}
1722
1723static void
1724nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1725 struct nvmet_fc_ls_iod *iod)
1726{
1727 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1728 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1729 struct nvmet_fc_tgt_queue *queue;
1730 int ret = 0;
1731
1732 memset(acc, 0, sizeof(*acc));
1733
1734 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1735 ret = VERR_CR_CONN_LEN;
1736 else if (rqst->desc_list_len !=
1737 fcnvme_lsdesc_len(
1738 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1739 ret = VERR_CR_CONN_RQST_LEN;
1740 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1741 ret = VERR_ASSOC_ID;
1742 else if (rqst->associd.desc_len !=
1743 fcnvme_lsdesc_len(
1744 sizeof(struct fcnvme_lsdesc_assoc_id)))
1745 ret = VERR_ASSOC_ID_LEN;
1746 else if (rqst->connect_cmd.desc_tag !=
1747 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1748 ret = VERR_CR_CONN_CMD;
1749 else if (rqst->connect_cmd.desc_len !=
1750 fcnvme_lsdesc_len(
1751 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1752 ret = VERR_CR_CONN_CMD_LEN;
1753 else if (!rqst->connect_cmd.ersp_ratio ||
1754 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1755 be16_to_cpu(rqst->connect_cmd.sqsize)))
1756 ret = VERR_ERSP_RATIO;
1757
1758 else {
1759
1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1761 be64_to_cpu(rqst->associd.association_id));
1762 if (!iod->assoc)
1763 ret = VERR_NO_ASSOC;
1764 else {
1765 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1766 be16_to_cpu(rqst->connect_cmd.qid),
1767 be16_to_cpu(rqst->connect_cmd.sqsize));
1768 if (!queue)
1769 ret = VERR_QUEUE_ALLOC_FAIL;
1770
1771
1772 nvmet_fc_tgt_a_put(iod->assoc);
1773 }
1774 }
1775
1776 if (ret) {
1777 dev_err(tgtport->dev,
1778 "Create Connection LS failed: %s\n",
1779 validation_errors[ret]);
1780 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1781 sizeof(*acc), rqst->w0.ls_cmd,
1782 (ret == VERR_NO_ASSOC) ?
1783 FCNVME_RJT_RC_INV_ASSOC :
1784 FCNVME_RJT_RC_LOGIC,
1785 FCNVME_RJT_EXP_NONE, 0);
1786 return;
1787 }
1788
1789 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1790 atomic_set(&queue->connected, 1);
1791 queue->sqhd = 0;
1792
1793
1794
1795 iod->lsrsp->rsplen = sizeof(*acc);
1796
1797 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1798 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1799 FCNVME_LS_CREATE_CONNECTION);
1800 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1801 acc->connectid.desc_len =
1802 fcnvme_lsdesc_len(
1803 sizeof(struct fcnvme_lsdesc_conn_id));
1804 acc->connectid.connection_id =
1805 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1806 be16_to_cpu(rqst->connect_cmd.qid)));
1807}
1808
1809
1810
1811
1812
1813static int
1814nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1815 struct nvmet_fc_ls_iod *iod)
1816{
1817 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1818 &iod->rqstbuf->rq_dis_assoc;
1819 struct fcnvme_ls_disconnect_assoc_acc *acc =
1820 &iod->rspbuf->rsp_dis_assoc;
1821 struct nvmet_fc_tgt_assoc *assoc = NULL;
1822 struct nvmet_fc_ls_iod *oldls = NULL;
1823 unsigned long flags;
1824 int ret = 0;
1825
1826 memset(acc, 0, sizeof(*acc));
1827
1828 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1829 if (!ret) {
1830
1831 assoc = nvmet_fc_find_target_assoc(tgtport,
1832 be64_to_cpu(rqst->associd.association_id));
1833 iod->assoc = assoc;
1834 if (!assoc)
1835 ret = VERR_NO_ASSOC;
1836 }
1837
1838 if (ret || !assoc) {
1839 dev_err(tgtport->dev,
1840 "Disconnect LS failed: %s\n",
1841 validation_errors[ret]);
1842 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1843 sizeof(*acc), rqst->w0.ls_cmd,
1844 (ret == VERR_NO_ASSOC) ?
1845 FCNVME_RJT_RC_INV_ASSOC :
1846 FCNVME_RJT_RC_LOGIC,
1847 FCNVME_RJT_EXP_NONE, 0);
1848 return true;
1849 }
1850
1851
1852
1853 iod->lsrsp->rsplen = sizeof(*acc);
1854
1855 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1856 fcnvme_lsdesc_len(
1857 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1858 FCNVME_LS_DISCONNECT_ASSOC);
1859
1860
1861 nvmet_fc_tgt_a_put(assoc);
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 spin_lock_irqsave(&tgtport->lock, flags);
1873 oldls = assoc->rcv_disconn;
1874 assoc->rcv_disconn = iod;
1875 spin_unlock_irqrestore(&tgtport->lock, flags);
1876
1877 nvmet_fc_delete_target_assoc(assoc);
1878
1879 if (oldls) {
1880 dev_info(tgtport->dev,
1881 "{%d:%d} Multiple Disconnect Association LS's "
1882 "received\n",
1883 tgtport->fc_target_port.port_num, assoc->a_id);
1884
1885 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1886 sizeof(*iod->rspbuf),
1887
1888 rqst->w0.ls_cmd,
1889 FCNVME_RJT_RC_UNAB,
1890 FCNVME_RJT_EXP_NONE, 0);
1891 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1892 }
1893
1894 return false;
1895}
1896
1897
1898
1899
1900
1901static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1902
1903static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1904
1905static void
1906nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1907{
1908 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1909 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1910
1911 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1912 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1913 nvmet_fc_free_ls_iod(tgtport, iod);
1914 nvmet_fc_tgtport_put(tgtport);
1915}
1916
1917static void
1918nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1919 struct nvmet_fc_ls_iod *iod)
1920{
1921 int ret;
1922
1923 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1924 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1925
1926 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1927 if (ret)
1928 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1929}
1930
1931
1932
1933
1934static void
1935nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1936 struct nvmet_fc_ls_iod *iod)
1937{
1938 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1939 bool sendrsp = true;
1940
1941 iod->lsrsp->nvme_fc_private = iod;
1942 iod->lsrsp->rspbuf = iod->rspbuf;
1943 iod->lsrsp->rspdma = iod->rspdma;
1944 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1945
1946 iod->lsrsp->rsplen = 0;
1947
1948 iod->assoc = NULL;
1949
1950
1951
1952
1953
1954
1955 switch (w0->ls_cmd) {
1956 case FCNVME_LS_CREATE_ASSOCIATION:
1957
1958 nvmet_fc_ls_create_association(tgtport, iod);
1959 break;
1960 case FCNVME_LS_CREATE_CONNECTION:
1961
1962 nvmet_fc_ls_create_connection(tgtport, iod);
1963 break;
1964 case FCNVME_LS_DISCONNECT_ASSOC:
1965
1966 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1967 break;
1968 default:
1969 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1970 sizeof(*iod->rspbuf), w0->ls_cmd,
1971 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1972 }
1973
1974 if (sendrsp)
1975 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1976}
1977
1978
1979
1980
1981static void
1982nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1983{
1984 struct nvmet_fc_ls_iod *iod =
1985 container_of(work, struct nvmet_fc_ls_iod, work);
1986 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1987
1988 nvmet_fc_handle_ls_rqst(tgtport, iod);
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009int
2010nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2011 void *hosthandle,
2012 struct nvmefc_ls_rsp *lsrsp,
2013 void *lsreqbuf, u32 lsreqbuf_len)
2014{
2015 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2016 struct nvmet_fc_ls_iod *iod;
2017 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2018
2019 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2020 dev_info(tgtport->dev,
2021 "RCV %s LS failed: payload too large (%d)\n",
2022 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2023 nvmefc_ls_names[w0->ls_cmd] : "",
2024 lsreqbuf_len);
2025 return -E2BIG;
2026 }
2027
2028 if (!nvmet_fc_tgtport_get(tgtport)) {
2029 dev_info(tgtport->dev,
2030 "RCV %s LS failed: target deleting\n",
2031 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2032 nvmefc_ls_names[w0->ls_cmd] : "");
2033 return -ESHUTDOWN;
2034 }
2035
2036 iod = nvmet_fc_alloc_ls_iod(tgtport);
2037 if (!iod) {
2038 dev_info(tgtport->dev,
2039 "RCV %s LS failed: context allocation failed\n",
2040 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2041 nvmefc_ls_names[w0->ls_cmd] : "");
2042 nvmet_fc_tgtport_put(tgtport);
2043 return -ENOENT;
2044 }
2045
2046 iod->lsrsp = lsrsp;
2047 iod->fcpreq = NULL;
2048 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2049 iod->rqstdatalen = lsreqbuf_len;
2050 iod->hosthandle = hosthandle;
2051
2052 schedule_work(&iod->work);
2053
2054 return 0;
2055}
2056EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2057
2058
2059
2060
2061
2062
2063
2064
2065static int
2066nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2067{
2068 struct scatterlist *sg;
2069 unsigned int nent;
2070
2071 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2072 if (!sg)
2073 goto out;
2074
2075 fod->data_sg = sg;
2076 fod->data_sg_cnt = nent;
2077 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2078 ((fod->io_dir == NVMET_FCP_WRITE) ?
2079 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2080
2081 fod->next_sg = fod->data_sg;
2082
2083 return 0;
2084
2085out:
2086 return NVME_SC_INTERNAL;
2087}
2088
2089static void
2090nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2091{
2092 if (!fod->data_sg || !fod->data_sg_cnt)
2093 return;
2094
2095 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2096 ((fod->io_dir == NVMET_FCP_WRITE) ?
2097 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2098 sgl_free(fod->data_sg);
2099 fod->data_sg = NULL;
2100 fod->data_sg_cnt = 0;
2101}
2102
2103
2104static bool
2105queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2106{
2107 u32 sqtail, used;
2108
2109
2110 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2111
2112 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2113 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2114}
2115
2116
2117
2118
2119
2120static void
2121nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2122 struct nvmet_fc_fcp_iod *fod)
2123{
2124 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2125 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2126 struct nvme_completion *cqe = &ersp->cqe;
2127 u32 *cqewd = (u32 *)cqe;
2128 bool send_ersp = false;
2129 u32 rsn, rspcnt, xfr_length;
2130
2131 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2132 xfr_length = fod->req.transfer_len;
2133 else
2134 xfr_length = fod->offset;
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2156 if (!(rspcnt % fod->queue->ersp_ratio) ||
2157 nvme_is_fabrics((struct nvme_command *) sqe) ||
2158 xfr_length != fod->req.transfer_len ||
2159 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2160 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2161 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2162 send_ersp = true;
2163
2164
2165 fod->fcpreq->rspaddr = ersp;
2166 fod->fcpreq->rspdma = fod->rspdma;
2167
2168 if (!send_ersp) {
2169 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2170 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2171 } else {
2172 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2173 rsn = atomic_inc_return(&fod->queue->rsn);
2174 ersp->rsn = cpu_to_be32(rsn);
2175 ersp->xfrd_len = cpu_to_be32(xfr_length);
2176 fod->fcpreq->rsplen = sizeof(*ersp);
2177 }
2178
2179 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2180 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2181}
2182
2183static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2184
2185static void
2186nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2187 struct nvmet_fc_fcp_iod *fod)
2188{
2189 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2190
2191
2192 nvmet_fc_free_tgt_pgs(fod);
2193
2194
2195
2196
2197
2198
2199 if (!fod->aborted)
2200 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2201
2202 nvmet_fc_free_fcp_iod(fod->queue, fod);
2203}
2204
2205static void
2206nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2207 struct nvmet_fc_fcp_iod *fod)
2208{
2209 int ret;
2210
2211 fod->fcpreq->op = NVMET_FCOP_RSP;
2212 fod->fcpreq->timeout = 0;
2213
2214 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2215
2216 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2217 if (ret)
2218 nvmet_fc_abort_op(tgtport, fod);
2219}
2220
2221static void
2222nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2223 struct nvmet_fc_fcp_iod *fod, u8 op)
2224{
2225 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2226 struct scatterlist *sg = fod->next_sg;
2227 unsigned long flags;
2228 u32 remaininglen = fod->req.transfer_len - fod->offset;
2229 u32 tlen = 0;
2230 int ret;
2231
2232 fcpreq->op = op;
2233 fcpreq->offset = fod->offset;
2234 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245 fcpreq->sg = sg;
2246 fcpreq->sg_cnt = 0;
2247 while (tlen < remaininglen &&
2248 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2249 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2250 fcpreq->sg_cnt++;
2251 tlen += sg_dma_len(sg);
2252 sg = sg_next(sg);
2253 }
2254 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2255 fcpreq->sg_cnt++;
2256 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2257 sg = sg_next(sg);
2258 }
2259 if (tlen < remaininglen)
2260 fod->next_sg = sg;
2261 else
2262 fod->next_sg = NULL;
2263
2264 fcpreq->transfer_length = tlen;
2265 fcpreq->transferred_length = 0;
2266 fcpreq->fcp_error = 0;
2267 fcpreq->rsplen = 0;
2268
2269
2270
2271
2272
2273 if ((op == NVMET_FCOP_READDATA) &&
2274 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2275 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2276 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2277 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2278 }
2279
2280 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2281 if (ret) {
2282
2283
2284
2285
2286
2287 fod->abort = true;
2288
2289 if (op == NVMET_FCOP_WRITEDATA) {
2290 spin_lock_irqsave(&fod->flock, flags);
2291 fod->writedataactive = false;
2292 spin_unlock_irqrestore(&fod->flock, flags);
2293 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2294 } else {
2295 fcpreq->fcp_error = ret;
2296 fcpreq->transferred_length = 0;
2297 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2298 }
2299 }
2300}
2301
2302static inline bool
2303__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2304{
2305 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2306 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2307
2308
2309 if (abort) {
2310 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2311 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2312 return true;
2313 }
2314
2315 nvmet_fc_abort_op(tgtport, fod);
2316 return true;
2317 }
2318
2319 return false;
2320}
2321
2322
2323
2324
2325static void
2326nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2327{
2328 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2329 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2330 unsigned long flags;
2331 bool abort;
2332
2333 spin_lock_irqsave(&fod->flock, flags);
2334 abort = fod->abort;
2335 fod->writedataactive = false;
2336 spin_unlock_irqrestore(&fod->flock, flags);
2337
2338 switch (fcpreq->op) {
2339
2340 case NVMET_FCOP_WRITEDATA:
2341 if (__nvmet_fc_fod_op_abort(fod, abort))
2342 return;
2343 if (fcpreq->fcp_error ||
2344 fcpreq->transferred_length != fcpreq->transfer_length) {
2345 spin_lock_irqsave(&fod->flock, flags);
2346 fod->abort = true;
2347 spin_unlock_irqrestore(&fod->flock, flags);
2348
2349 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2350 return;
2351 }
2352
2353 fod->offset += fcpreq->transferred_length;
2354 if (fod->offset != fod->req.transfer_len) {
2355 spin_lock_irqsave(&fod->flock, flags);
2356 fod->writedataactive = true;
2357 spin_unlock_irqrestore(&fod->flock, flags);
2358
2359
2360 nvmet_fc_transfer_fcp_data(tgtport, fod,
2361 NVMET_FCOP_WRITEDATA);
2362 return;
2363 }
2364
2365
2366 fod->req.execute(&fod->req);
2367 break;
2368
2369 case NVMET_FCOP_READDATA:
2370 case NVMET_FCOP_READDATA_RSP:
2371 if (__nvmet_fc_fod_op_abort(fod, abort))
2372 return;
2373 if (fcpreq->fcp_error ||
2374 fcpreq->transferred_length != fcpreq->transfer_length) {
2375 nvmet_fc_abort_op(tgtport, fod);
2376 return;
2377 }
2378
2379
2380
2381 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2382
2383 nvmet_fc_free_tgt_pgs(fod);
2384 nvmet_fc_free_fcp_iod(fod->queue, fod);
2385 return;
2386 }
2387
2388 fod->offset += fcpreq->transferred_length;
2389 if (fod->offset != fod->req.transfer_len) {
2390
2391 nvmet_fc_transfer_fcp_data(tgtport, fod,
2392 NVMET_FCOP_READDATA);
2393 return;
2394 }
2395
2396
2397
2398
2399 nvmet_fc_free_tgt_pgs(fod);
2400
2401 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2402
2403 break;
2404
2405 case NVMET_FCOP_RSP:
2406 if (__nvmet_fc_fod_op_abort(fod, abort))
2407 return;
2408 nvmet_fc_free_fcp_iod(fod->queue, fod);
2409 break;
2410
2411 default:
2412 break;
2413 }
2414}
2415
2416static void
2417nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2418{
2419 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2420
2421 nvmet_fc_fod_op_done(fod);
2422}
2423
2424
2425
2426
2427static void
2428__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2429 struct nvmet_fc_fcp_iod *fod, int status)
2430{
2431 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2432 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2433 unsigned long flags;
2434 bool abort;
2435
2436 spin_lock_irqsave(&fod->flock, flags);
2437 abort = fod->abort;
2438 spin_unlock_irqrestore(&fod->flock, flags);
2439
2440
2441 if (!status)
2442 fod->queue->sqhd = cqe->sq_head;
2443
2444 if (abort) {
2445 nvmet_fc_abort_op(tgtport, fod);
2446 return;
2447 }
2448
2449
2450 if (status) {
2451
2452 memset(cqe, 0, sizeof(*cqe));
2453 cqe->sq_head = fod->queue->sqhd;
2454 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2455 cqe->command_id = sqe->command_id;
2456 cqe->status = cpu_to_le16(status);
2457 } else {
2458
2459
2460
2461
2462
2463
2464 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2465
2466 nvmet_fc_transfer_fcp_data(tgtport, fod,
2467 NVMET_FCOP_READDATA);
2468 return;
2469 }
2470
2471
2472 }
2473
2474
2475 nvmet_fc_free_tgt_pgs(fod);
2476
2477 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2478}
2479
2480
2481static void
2482nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2483{
2484 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2485 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2486
2487 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2488}
2489
2490
2491
2492
2493
2494static void
2495nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2496 struct nvmet_fc_fcp_iod *fod)
2497{
2498 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2499 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2500 int ret;
2501
2502
2503
2504
2505
2506 if (!tgtport->pe)
2507 goto transport_error;
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2519
2520 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2521 fod->io_dir = NVMET_FCP_WRITE;
2522 if (!nvme_is_write(&cmdiu->sqe))
2523 goto transport_error;
2524 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2525 fod->io_dir = NVMET_FCP_READ;
2526 if (nvme_is_write(&cmdiu->sqe))
2527 goto transport_error;
2528 } else {
2529 fod->io_dir = NVMET_FCP_NODATA;
2530 if (xfrlen)
2531 goto transport_error;
2532 }
2533
2534 fod->req.cmd = &fod->cmdiubuf.sqe;
2535 fod->req.cqe = &fod->rspiubuf.cqe;
2536 fod->req.port = tgtport->pe->port;
2537
2538
2539 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2540
2541 fod->data_sg = NULL;
2542 fod->data_sg_cnt = 0;
2543
2544 ret = nvmet_req_init(&fod->req,
2545 &fod->queue->nvme_cq,
2546 &fod->queue->nvme_sq,
2547 &nvmet_fc_tgt_fcp_ops);
2548 if (!ret) {
2549
2550
2551 return;
2552 }
2553
2554 fod->req.transfer_len = xfrlen;
2555
2556
2557 atomic_inc(&fod->queue->sqtail);
2558
2559 if (fod->req.transfer_len) {
2560 ret = nvmet_fc_alloc_tgt_pgs(fod);
2561 if (ret) {
2562 nvmet_req_complete(&fod->req, ret);
2563 return;
2564 }
2565 }
2566 fod->req.sg = fod->data_sg;
2567 fod->req.sg_cnt = fod->data_sg_cnt;
2568 fod->offset = 0;
2569
2570 if (fod->io_dir == NVMET_FCP_WRITE) {
2571
2572 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2573 return;
2574 }
2575
2576
2577
2578
2579
2580
2581
2582 fod->req.execute(&fod->req);
2583 return;
2584
2585transport_error:
2586 nvmet_fc_abort_op(tgtport, fod);
2587}
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636int
2637nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2638 struct nvmefc_tgt_fcp_req *fcpreq,
2639 void *cmdiubuf, u32 cmdiubuf_len)
2640{
2641 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2642 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2643 struct nvmet_fc_tgt_queue *queue;
2644 struct nvmet_fc_fcp_iod *fod;
2645 struct nvmet_fc_defer_fcp_req *deferfcp;
2646 unsigned long flags;
2647
2648
2649 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2650 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2651 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2652 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2653 return -EIO;
2654
2655 queue = nvmet_fc_find_target_queue(tgtport,
2656 be64_to_cpu(cmdiu->connection_id));
2657 if (!queue)
2658 return -ENOTCONN;
2659
2660
2661
2662
2663
2664
2665
2666
2667 spin_lock_irqsave(&queue->qlock, flags);
2668
2669 fod = nvmet_fc_alloc_fcp_iod(queue);
2670 if (fod) {
2671 spin_unlock_irqrestore(&queue->qlock, flags);
2672
2673 fcpreq->nvmet_fc_private = fod;
2674 fod->fcpreq = fcpreq;
2675
2676 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2677
2678 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2679
2680 return 0;
2681 }
2682
2683 if (!tgtport->ops->defer_rcv) {
2684 spin_unlock_irqrestore(&queue->qlock, flags);
2685
2686 nvmet_fc_tgt_q_put(queue);
2687 return -ENOENT;
2688 }
2689
2690 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2691 struct nvmet_fc_defer_fcp_req, req_list);
2692 if (deferfcp) {
2693
2694 list_del(&deferfcp->req_list);
2695 } else {
2696 spin_unlock_irqrestore(&queue->qlock, flags);
2697
2698
2699 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2700 if (!deferfcp) {
2701
2702 nvmet_fc_tgt_q_put(queue);
2703 return -ENOMEM;
2704 }
2705 spin_lock_irqsave(&queue->qlock, flags);
2706 }
2707
2708
2709 fcpreq->rspaddr = cmdiubuf;
2710 fcpreq->rsplen = cmdiubuf_len;
2711 deferfcp->fcp_req = fcpreq;
2712
2713
2714 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2715
2716
2717
2718 spin_unlock_irqrestore(&queue->qlock, flags);
2719
2720 return -EOVERFLOW;
2721}
2722EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747void
2748nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2749 struct nvmefc_tgt_fcp_req *fcpreq)
2750{
2751 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2752 struct nvmet_fc_tgt_queue *queue;
2753 unsigned long flags;
2754
2755 if (!fod || fod->fcpreq != fcpreq)
2756
2757 return;
2758
2759 queue = fod->queue;
2760
2761 spin_lock_irqsave(&queue->qlock, flags);
2762 if (fod->active) {
2763
2764
2765
2766
2767
2768 spin_lock(&fod->flock);
2769 fod->abort = true;
2770 fod->aborted = true;
2771 spin_unlock(&fod->flock);
2772 }
2773 spin_unlock_irqrestore(&queue->qlock, flags);
2774}
2775EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2776
2777
2778struct nvmet_fc_traddr {
2779 u64 nn;
2780 u64 pn;
2781};
2782
2783static int
2784__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2785{
2786 u64 token64;
2787
2788 if (match_u64(sstr, &token64))
2789 return -EINVAL;
2790 *val = token64;
2791
2792 return 0;
2793}
2794
2795
2796
2797
2798
2799
2800static int
2801nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2802{
2803 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2804 substring_t wwn = { name, &name[sizeof(name)-1] };
2805 int nnoffset, pnoffset;
2806
2807
2808 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2809 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2810 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2811 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2812 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2813 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2814 NVME_FC_TRADDR_OXNNLEN;
2815 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2816 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2817 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2818 "pn-", NVME_FC_TRADDR_NNLEN))) {
2819 nnoffset = NVME_FC_TRADDR_NNLEN;
2820 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2821 } else
2822 goto out_einval;
2823
2824 name[0] = '0';
2825 name[1] = 'x';
2826 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2827
2828 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2829 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2830 goto out_einval;
2831
2832 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2833 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2834 goto out_einval;
2835
2836 return 0;
2837
2838out_einval:
2839 pr_warn("%s: bad traddr string\n", __func__);
2840 return -EINVAL;
2841}
2842
2843static int
2844nvmet_fc_add_port(struct nvmet_port *port)
2845{
2846 struct nvmet_fc_tgtport *tgtport;
2847 struct nvmet_fc_port_entry *pe;
2848 struct nvmet_fc_traddr traddr = { 0L, 0L };
2849 unsigned long flags;
2850 int ret;
2851
2852
2853 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2854 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2855 return -EINVAL;
2856
2857
2858
2859 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2860 sizeof(port->disc_addr.traddr));
2861 if (ret)
2862 return ret;
2863
2864 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2865 if (!pe)
2866 return -ENOMEM;
2867
2868 ret = -ENXIO;
2869 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2870 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2871 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2872 (tgtport->fc_target_port.port_name == traddr.pn)) {
2873
2874 if (!tgtport->pe) {
2875 nvmet_fc_portentry_bind(tgtport, pe, port);
2876 ret = 0;
2877 } else
2878 ret = -EALREADY;
2879 break;
2880 }
2881 }
2882 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2883
2884 if (ret)
2885 kfree(pe);
2886
2887 return ret;
2888}
2889
2890static void
2891nvmet_fc_remove_port(struct nvmet_port *port)
2892{
2893 struct nvmet_fc_port_entry *pe = port->priv;
2894
2895 nvmet_fc_portentry_unbind(pe);
2896
2897 kfree(pe);
2898}
2899
2900static void
2901nvmet_fc_discovery_chg(struct nvmet_port *port)
2902{
2903 struct nvmet_fc_port_entry *pe = port->priv;
2904 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2905
2906 if (tgtport && tgtport->ops->discovery_event)
2907 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2908}
2909
2910static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2911 .owner = THIS_MODULE,
2912 .type = NVMF_TRTYPE_FC,
2913 .msdbd = 1,
2914 .add_port = nvmet_fc_add_port,
2915 .remove_port = nvmet_fc_remove_port,
2916 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2917 .delete_ctrl = nvmet_fc_delete_ctrl,
2918 .discovery_chg = nvmet_fc_discovery_chg,
2919};
2920
2921static int __init nvmet_fc_init_module(void)
2922{
2923 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2924}
2925
2926static void __exit nvmet_fc_exit_module(void)
2927{
2928
2929 if (!list_empty(&nvmet_fc_target_list))
2930 pr_warn("%s: targetport list not empty\n", __func__);
2931
2932 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2933
2934 ida_destroy(&nvmet_fc_tgtport_cnt);
2935}
2936
2937module_init(nvmet_fc_init_module);
2938module_exit(nvmet_fc_exit_module);
2939
2940MODULE_LICENSE("GPL v2");
2941