1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/blk-mq.h>
9#include <linux/parser.h>
10#include <linux/random.h>
11#include <uapi/scsi/fc/fc_fs.h>
12#include <uapi/scsi/fc/fc_els.h>
13
14#include "nvmet.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
17
18
19
20
21
22#define NVMET_LS_CTX_COUNT 256
23
24
25#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
26
27struct nvmet_fc_tgtport;
28struct nvmet_fc_tgt_assoc;
29
30struct nvmet_fc_ls_iod {
31 struct nvmefc_tgt_ls_req *lsreq;
32 struct nvmefc_tgt_fcp_req *fcpreq;
33
34 struct list_head ls_list;
35
36 struct nvmet_fc_tgtport *tgtport;
37 struct nvmet_fc_tgt_assoc *assoc;
38
39 u8 *rqstbuf;
40 u8 *rspbuf;
41 u16 rqstdatalen;
42 dma_addr_t rspdma;
43
44 struct scatterlist sg[2];
45
46 struct work_struct work;
47} __aligned(sizeof(unsigned long long));
48
49
50#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
51
52enum nvmet_fcp_datadir {
53 NVMET_FCP_NODATA,
54 NVMET_FCP_WRITE,
55 NVMET_FCP_READ,
56 NVMET_FCP_ABORTED,
57};
58
59struct nvmet_fc_fcp_iod {
60 struct nvmefc_tgt_fcp_req *fcpreq;
61
62 struct nvme_fc_cmd_iu cmdiubuf;
63 struct nvme_fc_ersp_iu rspiubuf;
64 dma_addr_t rspdma;
65 struct scatterlist *next_sg;
66 struct scatterlist *data_sg;
67 int data_sg_cnt;
68 u32 offset;
69 enum nvmet_fcp_datadir io_dir;
70 bool active;
71 bool abort;
72 bool aborted;
73 bool writedataactive;
74 spinlock_t flock;
75
76 struct nvmet_req req;
77 struct work_struct defer_work;
78
79 struct nvmet_fc_tgtport *tgtport;
80 struct nvmet_fc_tgt_queue *queue;
81
82 struct list_head fcp_list;
83};
84
85struct nvmet_fc_tgtport {
86
87 struct nvmet_fc_target_port fc_target_port;
88
89 struct list_head tgt_list;
90 struct device *dev;
91 struct nvmet_fc_target_template *ops;
92
93 struct nvmet_fc_ls_iod *iod;
94 spinlock_t lock;
95 struct list_head ls_list;
96 struct list_head ls_busylist;
97 struct list_head assoc_list;
98 struct ida assoc_cnt;
99 struct nvmet_fc_port_entry *pe;
100 struct kref ref;
101 u32 max_sg_cnt;
102};
103
104struct nvmet_fc_port_entry {
105 struct nvmet_fc_tgtport *tgtport;
106 struct nvmet_port *port;
107 u64 node_name;
108 u64 port_name;
109 struct list_head pe_list;
110};
111
112struct nvmet_fc_defer_fcp_req {
113 struct list_head req_list;
114 struct nvmefc_tgt_fcp_req *fcp_req;
115};
116
117struct nvmet_fc_tgt_queue {
118 bool ninetypercent;
119 u16 qid;
120 u16 sqsize;
121 u16 ersp_ratio;
122 __le16 sqhd;
123 atomic_t connected;
124 atomic_t sqtail;
125 atomic_t zrspcnt;
126 atomic_t rsn;
127 spinlock_t qlock;
128 struct nvmet_cq nvme_cq;
129 struct nvmet_sq nvme_sq;
130 struct nvmet_fc_tgt_assoc *assoc;
131 struct nvmet_fc_fcp_iod *fod;
132 struct list_head fod_list;
133 struct list_head pending_cmd_list;
134 struct list_head avail_defer_list;
135 struct workqueue_struct *work_q;
136 struct kref ref;
137} __aligned(sizeof(unsigned long long));
138
139struct nvmet_fc_tgt_assoc {
140 u64 association_id;
141 u32 a_id;
142 struct nvmet_fc_tgtport *tgtport;
143 struct list_head a_list;
144 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
145 struct kref ref;
146 struct work_struct del_work;
147};
148
149
150static inline int
151nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
152{
153 return (iodptr - iodptr->tgtport->iod);
154}
155
156static inline int
157nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
158{
159 return (fodptr - fodptr->queue->fod);
160}
161
162
163
164
165
166
167
168
169
170
171
172
173#define BYTES_FOR_QID sizeof(u16)
174#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
175#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
176
177static inline u64
178nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
179{
180 return (assoc->association_id | qid);
181}
182
183static inline u64
184nvmet_fc_getassociationid(u64 connectionid)
185{
186 return connectionid & ~NVMET_FC_QUEUEID_MASK;
187}
188
189static inline u16
190nvmet_fc_getqueueid(u64 connectionid)
191{
192 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
193}
194
195static inline struct nvmet_fc_tgtport *
196targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
197{
198 return container_of(targetport, struct nvmet_fc_tgtport,
199 fc_target_port);
200}
201
202static inline struct nvmet_fc_fcp_iod *
203nvmet_req_to_fod(struct nvmet_req *nvme_req)
204{
205 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
206}
207
208
209
210
211
212static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
213
214static LIST_HEAD(nvmet_fc_target_list);
215static DEFINE_IDA(nvmet_fc_tgtport_cnt);
216static LIST_HEAD(nvmet_fc_portentry_list);
217
218
219static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
220static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
221static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
222static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
223static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
224static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
225static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
226static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
227static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
228 struct nvmet_fc_fcp_iod *fod);
229static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250static inline dma_addr_t
251fc_dma_map_single(struct device *dev, void *ptr, size_t size,
252 enum dma_data_direction dir)
253{
254 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
255}
256
257static inline int
258fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
259{
260 return dev ? dma_mapping_error(dev, dma_addr) : 0;
261}
262
263static inline void
264fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
265 enum dma_data_direction dir)
266{
267 if (dev)
268 dma_unmap_single(dev, addr, size, dir);
269}
270
271static inline void
272fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
273 enum dma_data_direction dir)
274{
275 if (dev)
276 dma_sync_single_for_cpu(dev, addr, size, dir);
277}
278
279static inline void
280fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
281 enum dma_data_direction dir)
282{
283 if (dev)
284 dma_sync_single_for_device(dev, addr, size, dir);
285}
286
287
288static int
289fc_map_sg(struct scatterlist *sg, int nents)
290{
291 struct scatterlist *s;
292 int i;
293
294 WARN_ON(nents == 0 || sg[0].length == 0);
295
296 for_each_sg(sg, s, nents, i) {
297 s->dma_address = 0L;
298#ifdef CONFIG_NEED_SG_DMA_LENGTH
299 s->dma_length = s->length;
300#endif
301 }
302 return nents;
303}
304
305static inline int
306fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
307 enum dma_data_direction dir)
308{
309 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
310}
311
312static inline void
313fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
314 enum dma_data_direction dir)
315{
316 if (dev)
317 dma_unmap_sg(dev, sg, nents, dir);
318}
319
320
321
322
323
324static int
325nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
326{
327 struct nvmet_fc_ls_iod *iod;
328 int i;
329
330 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
331 GFP_KERNEL);
332 if (!iod)
333 return -ENOMEM;
334
335 tgtport->iod = iod;
336
337 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
338 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
339 iod->tgtport = tgtport;
340 list_add_tail(&iod->ls_list, &tgtport->ls_list);
341
342 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
343 GFP_KERNEL);
344 if (!iod->rqstbuf)
345 goto out_fail;
346
347 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
348
349 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
350 NVME_FC_MAX_LS_BUFFER_SIZE,
351 DMA_TO_DEVICE);
352 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
353 goto out_fail;
354 }
355
356 return 0;
357
358out_fail:
359 kfree(iod->rqstbuf);
360 list_del(&iod->ls_list);
361 for (iod--, i--; i >= 0; iod--, i--) {
362 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
363 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
364 kfree(iod->rqstbuf);
365 list_del(&iod->ls_list);
366 }
367
368 kfree(iod);
369
370 return -EFAULT;
371}
372
373static void
374nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
375{
376 struct nvmet_fc_ls_iod *iod = tgtport->iod;
377 int i;
378
379 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
380 fc_dma_unmap_single(tgtport->dev,
381 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
382 DMA_TO_DEVICE);
383 kfree(iod->rqstbuf);
384 list_del(&iod->ls_list);
385 }
386 kfree(tgtport->iod);
387}
388
389static struct nvmet_fc_ls_iod *
390nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
391{
392 struct nvmet_fc_ls_iod *iod;
393 unsigned long flags;
394
395 spin_lock_irqsave(&tgtport->lock, flags);
396 iod = list_first_entry_or_null(&tgtport->ls_list,
397 struct nvmet_fc_ls_iod, ls_list);
398 if (iod)
399 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
400 spin_unlock_irqrestore(&tgtport->lock, flags);
401 return iod;
402}
403
404
405static void
406nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
407 struct nvmet_fc_ls_iod *iod)
408{
409 unsigned long flags;
410
411 spin_lock_irqsave(&tgtport->lock, flags);
412 list_move(&iod->ls_list, &tgtport->ls_list);
413 spin_unlock_irqrestore(&tgtport->lock, flags);
414}
415
416static void
417nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
418 struct nvmet_fc_tgt_queue *queue)
419{
420 struct nvmet_fc_fcp_iod *fod = queue->fod;
421 int i;
422
423 for (i = 0; i < queue->sqsize; fod++, i++) {
424 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
425 fod->tgtport = tgtport;
426 fod->queue = queue;
427 fod->active = false;
428 fod->abort = false;
429 fod->aborted = false;
430 fod->fcpreq = NULL;
431 list_add_tail(&fod->fcp_list, &queue->fod_list);
432 spin_lock_init(&fod->flock);
433
434 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
435 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
436 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
437 list_del(&fod->fcp_list);
438 for (fod--, i--; i >= 0; fod--, i--) {
439 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
440 sizeof(fod->rspiubuf),
441 DMA_TO_DEVICE);
442 fod->rspdma = 0L;
443 list_del(&fod->fcp_list);
444 }
445
446 return;
447 }
448 }
449}
450
451static void
452nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
453 struct nvmet_fc_tgt_queue *queue)
454{
455 struct nvmet_fc_fcp_iod *fod = queue->fod;
456 int i;
457
458 for (i = 0; i < queue->sqsize; fod++, i++) {
459 if (fod->rspdma)
460 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
461 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
462 }
463}
464
465static struct nvmet_fc_fcp_iod *
466nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
467{
468 struct nvmet_fc_fcp_iod *fod;
469
470 lockdep_assert_held(&queue->qlock);
471
472 fod = list_first_entry_or_null(&queue->fod_list,
473 struct nvmet_fc_fcp_iod, fcp_list);
474 if (fod) {
475 list_del(&fod->fcp_list);
476 fod->active = true;
477
478
479
480
481
482 }
483 return fod;
484}
485
486
487static void
488nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
489 struct nvmet_fc_tgt_queue *queue,
490 struct nvmefc_tgt_fcp_req *fcpreq)
491{
492 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
493
494
495
496
497
498 fcpreq->hwqid = queue->qid ?
499 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
500
501 nvmet_fc_handle_fcp_rqst(tgtport, fod);
502}
503
504static void
505nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
506{
507 struct nvmet_fc_fcp_iod *fod =
508 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
509
510
511 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
512
513}
514
515static void
516nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
517 struct nvmet_fc_fcp_iod *fod)
518{
519 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
520 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
521 struct nvmet_fc_defer_fcp_req *deferfcp;
522 unsigned long flags;
523
524 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
525 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
526
527 fcpreq->nvmet_fc_private = NULL;
528
529 fod->active = false;
530 fod->abort = false;
531 fod->aborted = false;
532 fod->writedataactive = false;
533 fod->fcpreq = NULL;
534
535 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
536
537
538 nvmet_fc_tgt_q_put(queue);
539
540 spin_lock_irqsave(&queue->qlock, flags);
541 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
542 struct nvmet_fc_defer_fcp_req, req_list);
543 if (!deferfcp) {
544 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
545 spin_unlock_irqrestore(&queue->qlock, flags);
546 return;
547 }
548
549
550 list_del(&deferfcp->req_list);
551
552 fcpreq = deferfcp->fcp_req;
553
554
555 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
556
557 spin_unlock_irqrestore(&queue->qlock, flags);
558
559
560 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
561
562
563 fcpreq->rspaddr = NULL;
564 fcpreq->rsplen = 0;
565 fcpreq->nvmet_fc_private = fod;
566 fod->fcpreq = fcpreq;
567 fod->active = true;
568
569
570 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
571
572
573
574
575
576
577 queue_work(queue->work_q, &fod->defer_work);
578}
579
580static struct nvmet_fc_tgt_queue *
581nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
582 u16 qid, u16 sqsize)
583{
584 struct nvmet_fc_tgt_queue *queue;
585 unsigned long flags;
586 int ret;
587
588 if (qid > NVMET_NR_QUEUES)
589 return NULL;
590
591 queue = kzalloc((sizeof(*queue) +
592 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
593 GFP_KERNEL);
594 if (!queue)
595 return NULL;
596
597 if (!nvmet_fc_tgt_a_get(assoc))
598 goto out_free_queue;
599
600 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
601 assoc->tgtport->fc_target_port.port_num,
602 assoc->a_id, qid);
603 if (!queue->work_q)
604 goto out_a_put;
605
606 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
607 queue->qid = qid;
608 queue->sqsize = sqsize;
609 queue->assoc = assoc;
610 INIT_LIST_HEAD(&queue->fod_list);
611 INIT_LIST_HEAD(&queue->avail_defer_list);
612 INIT_LIST_HEAD(&queue->pending_cmd_list);
613 atomic_set(&queue->connected, 0);
614 atomic_set(&queue->sqtail, 0);
615 atomic_set(&queue->rsn, 1);
616 atomic_set(&queue->zrspcnt, 0);
617 spin_lock_init(&queue->qlock);
618 kref_init(&queue->ref);
619
620 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
621
622 ret = nvmet_sq_init(&queue->nvme_sq);
623 if (ret)
624 goto out_fail_iodlist;
625
626 WARN_ON(assoc->queues[qid]);
627 spin_lock_irqsave(&assoc->tgtport->lock, flags);
628 assoc->queues[qid] = queue;
629 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
630
631 return queue;
632
633out_fail_iodlist:
634 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
635 destroy_workqueue(queue->work_q);
636out_a_put:
637 nvmet_fc_tgt_a_put(assoc);
638out_free_queue:
639 kfree(queue);
640 return NULL;
641}
642
643
644static void
645nvmet_fc_tgt_queue_free(struct kref *ref)
646{
647 struct nvmet_fc_tgt_queue *queue =
648 container_of(ref, struct nvmet_fc_tgt_queue, ref);
649 unsigned long flags;
650
651 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
652 queue->assoc->queues[queue->qid] = NULL;
653 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
654
655 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
656
657 nvmet_fc_tgt_a_put(queue->assoc);
658
659 destroy_workqueue(queue->work_q);
660
661 kfree(queue);
662}
663
664static void
665nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
666{
667 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
668}
669
670static int
671nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
672{
673 return kref_get_unless_zero(&queue->ref);
674}
675
676
677static void
678nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
679{
680 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
681 struct nvmet_fc_fcp_iod *fod = queue->fod;
682 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
683 unsigned long flags;
684 int i, writedataactive;
685 bool disconnect;
686
687 disconnect = atomic_xchg(&queue->connected, 0);
688
689 spin_lock_irqsave(&queue->qlock, flags);
690
691 for (i = 0; i < queue->sqsize; fod++, i++) {
692 if (fod->active) {
693 spin_lock(&fod->flock);
694 fod->abort = true;
695 writedataactive = fod->writedataactive;
696 spin_unlock(&fod->flock);
697
698
699
700
701
702 if (writedataactive) {
703 spin_lock(&fod->flock);
704 fod->aborted = true;
705 spin_unlock(&fod->flock);
706 tgtport->ops->fcp_abort(
707 &tgtport->fc_target_port, fod->fcpreq);
708 }
709 }
710 }
711
712
713 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
714 req_list) {
715 list_del(&deferfcp->req_list);
716 kfree(deferfcp);
717 }
718
719 for (;;) {
720 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
721 struct nvmet_fc_defer_fcp_req, req_list);
722 if (!deferfcp)
723 break;
724
725 list_del(&deferfcp->req_list);
726 spin_unlock_irqrestore(&queue->qlock, flags);
727
728 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
729 deferfcp->fcp_req);
730
731 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
732 deferfcp->fcp_req);
733
734 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
735 deferfcp->fcp_req);
736
737
738 nvmet_fc_tgt_q_put(queue);
739
740 kfree(deferfcp);
741
742 spin_lock_irqsave(&queue->qlock, flags);
743 }
744 spin_unlock_irqrestore(&queue->qlock, flags);
745
746 flush_workqueue(queue->work_q);
747
748 if (disconnect)
749 nvmet_sq_destroy(&queue->nvme_sq);
750
751 nvmet_fc_tgt_q_put(queue);
752}
753
754static struct nvmet_fc_tgt_queue *
755nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
756 u64 connection_id)
757{
758 struct nvmet_fc_tgt_assoc *assoc;
759 struct nvmet_fc_tgt_queue *queue;
760 u64 association_id = nvmet_fc_getassociationid(connection_id);
761 u16 qid = nvmet_fc_getqueueid(connection_id);
762 unsigned long flags;
763
764 if (qid > NVMET_NR_QUEUES)
765 return NULL;
766
767 spin_lock_irqsave(&tgtport->lock, flags);
768 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
769 if (association_id == assoc->association_id) {
770 queue = assoc->queues[qid];
771 if (queue &&
772 (!atomic_read(&queue->connected) ||
773 !nvmet_fc_tgt_q_get(queue)))
774 queue = NULL;
775 spin_unlock_irqrestore(&tgtport->lock, flags);
776 return queue;
777 }
778 }
779 spin_unlock_irqrestore(&tgtport->lock, flags);
780 return NULL;
781}
782
783static void
784nvmet_fc_delete_assoc(struct work_struct *work)
785{
786 struct nvmet_fc_tgt_assoc *assoc =
787 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
788
789 nvmet_fc_delete_target_assoc(assoc);
790 nvmet_fc_tgt_a_put(assoc);
791}
792
793static struct nvmet_fc_tgt_assoc *
794nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
795{
796 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
797 unsigned long flags;
798 u64 ran;
799 int idx;
800 bool needrandom = true;
801
802 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
803 if (!assoc)
804 return NULL;
805
806 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
807 if (idx < 0)
808 goto out_free_assoc;
809
810 if (!nvmet_fc_tgtport_get(tgtport))
811 goto out_ida_put;
812
813 assoc->tgtport = tgtport;
814 assoc->a_id = idx;
815 INIT_LIST_HEAD(&assoc->a_list);
816 kref_init(&assoc->ref);
817 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
818
819 while (needrandom) {
820 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
821 ran = ran << BYTES_FOR_QID_SHIFT;
822
823 spin_lock_irqsave(&tgtport->lock, flags);
824 needrandom = false;
825 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
826 if (ran == tmpassoc->association_id) {
827 needrandom = true;
828 break;
829 }
830 if (!needrandom) {
831 assoc->association_id = ran;
832 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
833 }
834 spin_unlock_irqrestore(&tgtport->lock, flags);
835 }
836
837 return assoc;
838
839out_ida_put:
840 ida_simple_remove(&tgtport->assoc_cnt, idx);
841out_free_assoc:
842 kfree(assoc);
843 return NULL;
844}
845
846static void
847nvmet_fc_target_assoc_free(struct kref *ref)
848{
849 struct nvmet_fc_tgt_assoc *assoc =
850 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
851 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
852 unsigned long flags;
853
854 spin_lock_irqsave(&tgtport->lock, flags);
855 list_del(&assoc->a_list);
856 spin_unlock_irqrestore(&tgtport->lock, flags);
857 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
858 kfree(assoc);
859 nvmet_fc_tgtport_put(tgtport);
860}
861
862static void
863nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
864{
865 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
866}
867
868static int
869nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
870{
871 return kref_get_unless_zero(&assoc->ref);
872}
873
874static void
875nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
876{
877 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
878 struct nvmet_fc_tgt_queue *queue;
879 unsigned long flags;
880 int i;
881
882 spin_lock_irqsave(&tgtport->lock, flags);
883 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
884 queue = assoc->queues[i];
885 if (queue) {
886 if (!nvmet_fc_tgt_q_get(queue))
887 continue;
888 spin_unlock_irqrestore(&tgtport->lock, flags);
889 nvmet_fc_delete_target_queue(queue);
890 nvmet_fc_tgt_q_put(queue);
891 spin_lock_irqsave(&tgtport->lock, flags);
892 }
893 }
894 spin_unlock_irqrestore(&tgtport->lock, flags);
895
896 nvmet_fc_tgt_a_put(assoc);
897}
898
899static struct nvmet_fc_tgt_assoc *
900nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
901 u64 association_id)
902{
903 struct nvmet_fc_tgt_assoc *assoc;
904 struct nvmet_fc_tgt_assoc *ret = NULL;
905 unsigned long flags;
906
907 spin_lock_irqsave(&tgtport->lock, flags);
908 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
909 if (association_id == assoc->association_id) {
910 ret = assoc;
911 nvmet_fc_tgt_a_get(assoc);
912 break;
913 }
914 }
915 spin_unlock_irqrestore(&tgtport->lock, flags);
916
917 return ret;
918}
919
920static void
921nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
922 struct nvmet_fc_port_entry *pe,
923 struct nvmet_port *port)
924{
925 lockdep_assert_held(&nvmet_fc_tgtlock);
926
927 pe->tgtport = tgtport;
928 tgtport->pe = pe;
929
930 pe->port = port;
931 port->priv = pe;
932
933 pe->node_name = tgtport->fc_target_port.node_name;
934 pe->port_name = tgtport->fc_target_port.port_name;
935 INIT_LIST_HEAD(&pe->pe_list);
936
937 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
938}
939
940static void
941nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
942{
943 unsigned long flags;
944
945 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
946 if (pe->tgtport)
947 pe->tgtport->pe = NULL;
948 list_del(&pe->pe_list);
949 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
950}
951
952
953
954
955
956
957static void
958nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
959{
960 struct nvmet_fc_port_entry *pe;
961 unsigned long flags;
962
963 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
964 pe = tgtport->pe;
965 if (pe)
966 pe->tgtport = NULL;
967 tgtport->pe = NULL;
968 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
969}
970
971
972
973
974
975
976
977
978
979static void
980nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
981{
982 struct nvmet_fc_port_entry *pe;
983 unsigned long flags;
984
985 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
986 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
987 if (tgtport->fc_target_port.node_name == pe->node_name &&
988 tgtport->fc_target_port.port_name == pe->port_name) {
989 WARN_ON(pe->tgtport);
990 tgtport->pe = pe;
991 pe->tgtport = tgtport;
992 break;
993 }
994 }
995 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015int
1016nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1017 struct nvmet_fc_target_template *template,
1018 struct device *dev,
1019 struct nvmet_fc_target_port **portptr)
1020{
1021 struct nvmet_fc_tgtport *newrec;
1022 unsigned long flags;
1023 int ret, idx;
1024
1025 if (!template->xmt_ls_rsp || !template->fcp_op ||
1026 !template->fcp_abort ||
1027 !template->fcp_req_release || !template->targetport_delete ||
1028 !template->max_hw_queues || !template->max_sgl_segments ||
1029 !template->max_dif_sgl_segments || !template->dma_boundary) {
1030 ret = -EINVAL;
1031 goto out_regtgt_failed;
1032 }
1033
1034 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1035 GFP_KERNEL);
1036 if (!newrec) {
1037 ret = -ENOMEM;
1038 goto out_regtgt_failed;
1039 }
1040
1041 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1042 if (idx < 0) {
1043 ret = -ENOSPC;
1044 goto out_fail_kfree;
1045 }
1046
1047 if (!get_device(dev) && dev) {
1048 ret = -ENODEV;
1049 goto out_ida_put;
1050 }
1051
1052 newrec->fc_target_port.node_name = pinfo->node_name;
1053 newrec->fc_target_port.port_name = pinfo->port_name;
1054 newrec->fc_target_port.private = &newrec[1];
1055 newrec->fc_target_port.port_id = pinfo->port_id;
1056 newrec->fc_target_port.port_num = idx;
1057 INIT_LIST_HEAD(&newrec->tgt_list);
1058 newrec->dev = dev;
1059 newrec->ops = template;
1060 spin_lock_init(&newrec->lock);
1061 INIT_LIST_HEAD(&newrec->ls_list);
1062 INIT_LIST_HEAD(&newrec->ls_busylist);
1063 INIT_LIST_HEAD(&newrec->assoc_list);
1064 kref_init(&newrec->ref);
1065 ida_init(&newrec->assoc_cnt);
1066 newrec->max_sg_cnt = template->max_sgl_segments;
1067
1068 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1069 if (ret) {
1070 ret = -ENOMEM;
1071 goto out_free_newrec;
1072 }
1073
1074 nvmet_fc_portentry_rebind_tgt(newrec);
1075
1076 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1077 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1078 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1079
1080 *portptr = &newrec->fc_target_port;
1081 return 0;
1082
1083out_free_newrec:
1084 put_device(dev);
1085out_ida_put:
1086 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1087out_fail_kfree:
1088 kfree(newrec);
1089out_regtgt_failed:
1090 *portptr = NULL;
1091 return ret;
1092}
1093EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1094
1095
1096static void
1097nvmet_fc_free_tgtport(struct kref *ref)
1098{
1099 struct nvmet_fc_tgtport *tgtport =
1100 container_of(ref, struct nvmet_fc_tgtport, ref);
1101 struct device *dev = tgtport->dev;
1102 unsigned long flags;
1103
1104 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1105 list_del(&tgtport->tgt_list);
1106 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1107
1108 nvmet_fc_free_ls_iodlist(tgtport);
1109
1110
1111 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1112
1113 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1114 tgtport->fc_target_port.port_num);
1115
1116 ida_destroy(&tgtport->assoc_cnt);
1117
1118 kfree(tgtport);
1119
1120 put_device(dev);
1121}
1122
1123static void
1124nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1125{
1126 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1127}
1128
1129static int
1130nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1131{
1132 return kref_get_unless_zero(&tgtport->ref);
1133}
1134
1135static void
1136__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1137{
1138 struct nvmet_fc_tgt_assoc *assoc, *next;
1139 unsigned long flags;
1140
1141 spin_lock_irqsave(&tgtport->lock, flags);
1142 list_for_each_entry_safe(assoc, next,
1143 &tgtport->assoc_list, a_list) {
1144 if (!nvmet_fc_tgt_a_get(assoc))
1145 continue;
1146 if (!schedule_work(&assoc->del_work))
1147 nvmet_fc_tgt_a_put(assoc);
1148 }
1149 spin_unlock_irqrestore(&tgtport->lock, flags);
1150}
1151
1152
1153
1154
1155static void
1156nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1157{
1158 struct nvmet_fc_tgtport *tgtport, *next;
1159 struct nvmet_fc_tgt_assoc *assoc;
1160 struct nvmet_fc_tgt_queue *queue;
1161 unsigned long flags;
1162 bool found_ctrl = false;
1163
1164
1165 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1166 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1167 tgt_list) {
1168 if (!nvmet_fc_tgtport_get(tgtport))
1169 continue;
1170 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1171
1172 spin_lock_irqsave(&tgtport->lock, flags);
1173 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1174 queue = assoc->queues[0];
1175 if (queue && queue->nvme_sq.ctrl == ctrl) {
1176 if (nvmet_fc_tgt_a_get(assoc))
1177 found_ctrl = true;
1178 break;
1179 }
1180 }
1181 spin_unlock_irqrestore(&tgtport->lock, flags);
1182
1183 nvmet_fc_tgtport_put(tgtport);
1184
1185 if (found_ctrl) {
1186 if (!schedule_work(&assoc->del_work))
1187 nvmet_fc_tgt_a_put(assoc);
1188 return;
1189 }
1190
1191 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1192 }
1193 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207int
1208nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1209{
1210 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1211
1212 nvmet_fc_portentry_unbind_tgt(tgtport);
1213
1214
1215 __nvmet_fc_free_assocs(tgtport);
1216
1217 nvmet_fc_tgtport_put(tgtport);
1218
1219 return 0;
1220}
1221EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1222
1223
1224
1225
1226
1227static void
1228nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1229{
1230 struct fcnvme_ls_acc_hdr *acc = buf;
1231
1232 acc->w0.ls_cmd = ls_cmd;
1233 acc->desc_list_len = desc_len;
1234 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1235 acc->rqst.desc_len =
1236 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1237 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1238}
1239
1240static int
1241nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1242 u8 reason, u8 explanation, u8 vendor)
1243{
1244 struct fcnvme_ls_rjt *rjt = buf;
1245
1246 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1247 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1248 ls_cmd);
1249 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1250 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1251 rjt->rjt.reason_code = reason;
1252 rjt->rjt.reason_explanation = explanation;
1253 rjt->rjt.vendor = vendor;
1254
1255 return sizeof(struct fcnvme_ls_rjt);
1256}
1257
1258
1259enum {
1260 VERR_NO_ERROR = 0,
1261 VERR_CR_ASSOC_LEN = 1,
1262 VERR_CR_ASSOC_RQST_LEN = 2,
1263 VERR_CR_ASSOC_CMD = 3,
1264 VERR_CR_ASSOC_CMD_LEN = 4,
1265 VERR_ERSP_RATIO = 5,
1266 VERR_ASSOC_ALLOC_FAIL = 6,
1267 VERR_QUEUE_ALLOC_FAIL = 7,
1268 VERR_CR_CONN_LEN = 8,
1269 VERR_CR_CONN_RQST_LEN = 9,
1270 VERR_ASSOC_ID = 10,
1271 VERR_ASSOC_ID_LEN = 11,
1272 VERR_NO_ASSOC = 12,
1273 VERR_CONN_ID = 13,
1274 VERR_CONN_ID_LEN = 14,
1275 VERR_NO_CONN = 15,
1276 VERR_CR_CONN_CMD = 16,
1277 VERR_CR_CONN_CMD_LEN = 17,
1278 VERR_DISCONN_LEN = 18,
1279 VERR_DISCONN_RQST_LEN = 19,
1280 VERR_DISCONN_CMD = 20,
1281 VERR_DISCONN_CMD_LEN = 21,
1282 VERR_DISCONN_SCOPE = 22,
1283 VERR_RS_LEN = 23,
1284 VERR_RS_RQST_LEN = 24,
1285 VERR_RS_CMD = 25,
1286 VERR_RS_CMD_LEN = 26,
1287 VERR_RS_RCTL = 27,
1288 VERR_RS_RO = 28,
1289};
1290
1291static char *validation_errors[] = {
1292 "OK",
1293 "Bad CR_ASSOC Length",
1294 "Bad CR_ASSOC Rqst Length",
1295 "Not CR_ASSOC Cmd",
1296 "Bad CR_ASSOC Cmd Length",
1297 "Bad Ersp Ratio",
1298 "Association Allocation Failed",
1299 "Queue Allocation Failed",
1300 "Bad CR_CONN Length",
1301 "Bad CR_CONN Rqst Length",
1302 "Not Association ID",
1303 "Bad Association ID Length",
1304 "No Association",
1305 "Not Connection ID",
1306 "Bad Connection ID Length",
1307 "No Connection",
1308 "Not CR_CONN Cmd",
1309 "Bad CR_CONN Cmd Length",
1310 "Bad DISCONN Length",
1311 "Bad DISCONN Rqst Length",
1312 "Not DISCONN Cmd",
1313 "Bad DISCONN Cmd Length",
1314 "Bad Disconnect Scope",
1315 "Bad RS Length",
1316 "Bad RS Rqst Length",
1317 "Not RS Cmd",
1318 "Bad RS Cmd Length",
1319 "Bad RS R_CTL",
1320 "Bad RS Relative Offset",
1321};
1322
1323static void
1324nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1325 struct nvmet_fc_ls_iod *iod)
1326{
1327 struct fcnvme_ls_cr_assoc_rqst *rqst =
1328 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1329 struct fcnvme_ls_cr_assoc_acc *acc =
1330 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1331 struct nvmet_fc_tgt_queue *queue;
1332 int ret = 0;
1333
1334 memset(acc, 0, sizeof(*acc));
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1345 ret = VERR_CR_ASSOC_LEN;
1346 else if (be32_to_cpu(rqst->desc_list_len) <
1347 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1348 ret = VERR_CR_ASSOC_RQST_LEN;
1349 else if (rqst->assoc_cmd.desc_tag !=
1350 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1351 ret = VERR_CR_ASSOC_CMD;
1352 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1353 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1354 ret = VERR_CR_ASSOC_CMD_LEN;
1355 else if (!rqst->assoc_cmd.ersp_ratio ||
1356 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1357 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1358 ret = VERR_ERSP_RATIO;
1359
1360 else {
1361
1362 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1363 if (!iod->assoc)
1364 ret = VERR_ASSOC_ALLOC_FAIL;
1365 else {
1366 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1367 be16_to_cpu(rqst->assoc_cmd.sqsize));
1368 if (!queue)
1369 ret = VERR_QUEUE_ALLOC_FAIL;
1370 }
1371 }
1372
1373 if (ret) {
1374 dev_err(tgtport->dev,
1375 "Create Association LS failed: %s\n",
1376 validation_errors[ret]);
1377 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1378 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1379 FCNVME_RJT_RC_LOGIC,
1380 FCNVME_RJT_EXP_NONE, 0);
1381 return;
1382 }
1383
1384 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1385 atomic_set(&queue->connected, 1);
1386 queue->sqhd = 0;
1387
1388
1389
1390 iod->lsreq->rsplen = sizeof(*acc);
1391
1392 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1393 fcnvme_lsdesc_len(
1394 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1395 FCNVME_LS_CREATE_ASSOCIATION);
1396 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1397 acc->associd.desc_len =
1398 fcnvme_lsdesc_len(
1399 sizeof(struct fcnvme_lsdesc_assoc_id));
1400 acc->associd.association_id =
1401 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1402 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1403 acc->connectid.desc_len =
1404 fcnvme_lsdesc_len(
1405 sizeof(struct fcnvme_lsdesc_conn_id));
1406 acc->connectid.connection_id = acc->associd.association_id;
1407}
1408
1409static void
1410nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1411 struct nvmet_fc_ls_iod *iod)
1412{
1413 struct fcnvme_ls_cr_conn_rqst *rqst =
1414 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1415 struct fcnvme_ls_cr_conn_acc *acc =
1416 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1417 struct nvmet_fc_tgt_queue *queue;
1418 int ret = 0;
1419
1420 memset(acc, 0, sizeof(*acc));
1421
1422 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1423 ret = VERR_CR_CONN_LEN;
1424 else if (rqst->desc_list_len !=
1425 fcnvme_lsdesc_len(
1426 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1427 ret = VERR_CR_CONN_RQST_LEN;
1428 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1429 ret = VERR_ASSOC_ID;
1430 else if (rqst->associd.desc_len !=
1431 fcnvme_lsdesc_len(
1432 sizeof(struct fcnvme_lsdesc_assoc_id)))
1433 ret = VERR_ASSOC_ID_LEN;
1434 else if (rqst->connect_cmd.desc_tag !=
1435 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1436 ret = VERR_CR_CONN_CMD;
1437 else if (rqst->connect_cmd.desc_len !=
1438 fcnvme_lsdesc_len(
1439 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1440 ret = VERR_CR_CONN_CMD_LEN;
1441 else if (!rqst->connect_cmd.ersp_ratio ||
1442 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1443 be16_to_cpu(rqst->connect_cmd.sqsize)))
1444 ret = VERR_ERSP_RATIO;
1445
1446 else {
1447
1448 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1449 be64_to_cpu(rqst->associd.association_id));
1450 if (!iod->assoc)
1451 ret = VERR_NO_ASSOC;
1452 else {
1453 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1454 be16_to_cpu(rqst->connect_cmd.qid),
1455 be16_to_cpu(rqst->connect_cmd.sqsize));
1456 if (!queue)
1457 ret = VERR_QUEUE_ALLOC_FAIL;
1458
1459
1460 nvmet_fc_tgt_a_put(iod->assoc);
1461 }
1462 }
1463
1464 if (ret) {
1465 dev_err(tgtport->dev,
1466 "Create Connection LS failed: %s\n",
1467 validation_errors[ret]);
1468 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1469 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1470 (ret == VERR_NO_ASSOC) ?
1471 FCNVME_RJT_RC_INV_ASSOC :
1472 FCNVME_RJT_RC_LOGIC,
1473 FCNVME_RJT_EXP_NONE, 0);
1474 return;
1475 }
1476
1477 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1478 atomic_set(&queue->connected, 1);
1479 queue->sqhd = 0;
1480
1481
1482
1483 iod->lsreq->rsplen = sizeof(*acc);
1484
1485 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1486 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1487 FCNVME_LS_CREATE_CONNECTION);
1488 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1489 acc->connectid.desc_len =
1490 fcnvme_lsdesc_len(
1491 sizeof(struct fcnvme_lsdesc_conn_id));
1492 acc->connectid.connection_id =
1493 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1494 be16_to_cpu(rqst->connect_cmd.qid)));
1495}
1496
1497static void
1498nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1499 struct nvmet_fc_ls_iod *iod)
1500{
1501 struct fcnvme_ls_disconnect_rqst *rqst =
1502 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1503 struct fcnvme_ls_disconnect_acc *acc =
1504 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1505 struct nvmet_fc_tgt_assoc *assoc;
1506 int ret = 0;
1507
1508 memset(acc, 0, sizeof(*acc));
1509
1510 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1511 ret = VERR_DISCONN_LEN;
1512 else if (rqst->desc_list_len !=
1513 fcnvme_lsdesc_len(
1514 sizeof(struct fcnvme_ls_disconnect_rqst)))
1515 ret = VERR_DISCONN_RQST_LEN;
1516 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1517 ret = VERR_ASSOC_ID;
1518 else if (rqst->associd.desc_len !=
1519 fcnvme_lsdesc_len(
1520 sizeof(struct fcnvme_lsdesc_assoc_id)))
1521 ret = VERR_ASSOC_ID_LEN;
1522 else if (rqst->discon_cmd.desc_tag !=
1523 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1524 ret = VERR_DISCONN_CMD;
1525 else if (rqst->discon_cmd.desc_len !=
1526 fcnvme_lsdesc_len(
1527 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1528 ret = VERR_DISCONN_CMD_LEN;
1529 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1530 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1531 ret = VERR_DISCONN_SCOPE;
1532 else {
1533
1534 assoc = nvmet_fc_find_target_assoc(tgtport,
1535 be64_to_cpu(rqst->associd.association_id));
1536 iod->assoc = assoc;
1537 if (!assoc)
1538 ret = VERR_NO_ASSOC;
1539 }
1540
1541 if (ret) {
1542 dev_err(tgtport->dev,
1543 "Disconnect LS failed: %s\n",
1544 validation_errors[ret]);
1545 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1546 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1547 (ret == VERR_NO_ASSOC) ?
1548 FCNVME_RJT_RC_INV_ASSOC :
1549 (ret == VERR_NO_CONN) ?
1550 FCNVME_RJT_RC_INV_CONN :
1551 FCNVME_RJT_RC_LOGIC,
1552 FCNVME_RJT_EXP_NONE, 0);
1553 return;
1554 }
1555
1556
1557
1558 iod->lsreq->rsplen = sizeof(*acc);
1559
1560 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1561 fcnvme_lsdesc_len(
1562 sizeof(struct fcnvme_ls_disconnect_acc)),
1563 FCNVME_LS_DISCONNECT);
1564
1565
1566 nvmet_fc_tgt_a_put(iod->assoc);
1567
1568 nvmet_fc_delete_target_assoc(iod->assoc);
1569}
1570
1571
1572
1573
1574
1575static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1576
1577static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1578
1579static void
1580nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1581{
1582 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1583 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1584
1585 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1586 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1587 nvmet_fc_free_ls_iod(tgtport, iod);
1588 nvmet_fc_tgtport_put(tgtport);
1589}
1590
1591static void
1592nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1593 struct nvmet_fc_ls_iod *iod)
1594{
1595 int ret;
1596
1597 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1598 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1599
1600 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1601 if (ret)
1602 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1603}
1604
1605
1606
1607
1608static void
1609nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1610 struct nvmet_fc_ls_iod *iod)
1611{
1612 struct fcnvme_ls_rqst_w0 *w0 =
1613 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1614
1615 iod->lsreq->nvmet_fc_private = iod;
1616 iod->lsreq->rspbuf = iod->rspbuf;
1617 iod->lsreq->rspdma = iod->rspdma;
1618 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1619
1620 iod->lsreq->rsplen = 0;
1621
1622 iod->assoc = NULL;
1623
1624
1625
1626
1627
1628
1629 switch (w0->ls_cmd) {
1630 case FCNVME_LS_CREATE_ASSOCIATION:
1631
1632 nvmet_fc_ls_create_association(tgtport, iod);
1633 break;
1634 case FCNVME_LS_CREATE_CONNECTION:
1635
1636 nvmet_fc_ls_create_connection(tgtport, iod);
1637 break;
1638 case FCNVME_LS_DISCONNECT:
1639
1640 nvmet_fc_ls_disconnect(tgtport, iod);
1641 break;
1642 default:
1643 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1644 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1645 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1646 }
1647
1648 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1649}
1650
1651
1652
1653
1654static void
1655nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1656{
1657 struct nvmet_fc_ls_iod *iod =
1658 container_of(work, struct nvmet_fc_ls_iod, work);
1659 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1660
1661 nvmet_fc_handle_ls_rqst(tgtport, iod);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682int
1683nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1684 struct nvmefc_tgt_ls_req *lsreq,
1685 void *lsreqbuf, u32 lsreqbuf_len)
1686{
1687 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1688 struct nvmet_fc_ls_iod *iod;
1689
1690 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1691 return -E2BIG;
1692
1693 if (!nvmet_fc_tgtport_get(tgtport))
1694 return -ESHUTDOWN;
1695
1696 iod = nvmet_fc_alloc_ls_iod(tgtport);
1697 if (!iod) {
1698 nvmet_fc_tgtport_put(tgtport);
1699 return -ENOENT;
1700 }
1701
1702 iod->lsreq = lsreq;
1703 iod->fcpreq = NULL;
1704 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1705 iod->rqstdatalen = lsreqbuf_len;
1706
1707 schedule_work(&iod->work);
1708
1709 return 0;
1710}
1711EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1712
1713
1714
1715
1716
1717
1718
1719
1720static int
1721nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1722{
1723 struct scatterlist *sg;
1724 unsigned int nent;
1725
1726 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1727 if (!sg)
1728 goto out;
1729
1730 fod->data_sg = sg;
1731 fod->data_sg_cnt = nent;
1732 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1733 ((fod->io_dir == NVMET_FCP_WRITE) ?
1734 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1735
1736 fod->next_sg = fod->data_sg;
1737
1738 return 0;
1739
1740out:
1741 return NVME_SC_INTERNAL;
1742}
1743
1744static void
1745nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1746{
1747 if (!fod->data_sg || !fod->data_sg_cnt)
1748 return;
1749
1750 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1751 ((fod->io_dir == NVMET_FCP_WRITE) ?
1752 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1753 sgl_free(fod->data_sg);
1754 fod->data_sg = NULL;
1755 fod->data_sg_cnt = 0;
1756}
1757
1758
1759static bool
1760queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1761{
1762 u32 sqtail, used;
1763
1764
1765 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1766
1767 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1768 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1769}
1770
1771
1772
1773
1774
1775static void
1776nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1777 struct nvmet_fc_fcp_iod *fod)
1778{
1779 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1780 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1781 struct nvme_completion *cqe = &ersp->cqe;
1782 u32 *cqewd = (u32 *)cqe;
1783 bool send_ersp = false;
1784 u32 rsn, rspcnt, xfr_length;
1785
1786 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1787 xfr_length = fod->req.transfer_len;
1788 else
1789 xfr_length = fod->offset;
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1811 if (!(rspcnt % fod->queue->ersp_ratio) ||
1812 sqe->opcode == nvme_fabrics_command ||
1813 xfr_length != fod->req.transfer_len ||
1814 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1815 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1816 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1817 send_ersp = true;
1818
1819
1820 fod->fcpreq->rspaddr = ersp;
1821 fod->fcpreq->rspdma = fod->rspdma;
1822
1823 if (!send_ersp) {
1824 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1825 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1826 } else {
1827 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1828 rsn = atomic_inc_return(&fod->queue->rsn);
1829 ersp->rsn = cpu_to_be32(rsn);
1830 ersp->xfrd_len = cpu_to_be32(xfr_length);
1831 fod->fcpreq->rsplen = sizeof(*ersp);
1832 }
1833
1834 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1835 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1836}
1837
1838static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1839
1840static void
1841nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1842 struct nvmet_fc_fcp_iod *fod)
1843{
1844 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1845
1846
1847 nvmet_fc_free_tgt_pgs(fod);
1848
1849
1850
1851
1852
1853
1854 if (!fod->aborted)
1855 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1856
1857 nvmet_fc_free_fcp_iod(fod->queue, fod);
1858}
1859
1860static void
1861nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1862 struct nvmet_fc_fcp_iod *fod)
1863{
1864 int ret;
1865
1866 fod->fcpreq->op = NVMET_FCOP_RSP;
1867 fod->fcpreq->timeout = 0;
1868
1869 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1870
1871 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1872 if (ret)
1873 nvmet_fc_abort_op(tgtport, fod);
1874}
1875
1876static void
1877nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1878 struct nvmet_fc_fcp_iod *fod, u8 op)
1879{
1880 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1881 struct scatterlist *sg = fod->next_sg;
1882 unsigned long flags;
1883 u32 remaininglen = fod->req.transfer_len - fod->offset;
1884 u32 tlen = 0;
1885 int ret;
1886
1887 fcpreq->op = op;
1888 fcpreq->offset = fod->offset;
1889 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 fcpreq->sg = sg;
1901 fcpreq->sg_cnt = 0;
1902 while (tlen < remaininglen &&
1903 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1904 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1905 fcpreq->sg_cnt++;
1906 tlen += sg_dma_len(sg);
1907 sg = sg_next(sg);
1908 }
1909 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1910 fcpreq->sg_cnt++;
1911 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1912 sg = sg_next(sg);
1913 }
1914 if (tlen < remaininglen)
1915 fod->next_sg = sg;
1916 else
1917 fod->next_sg = NULL;
1918
1919 fcpreq->transfer_length = tlen;
1920 fcpreq->transferred_length = 0;
1921 fcpreq->fcp_error = 0;
1922 fcpreq->rsplen = 0;
1923
1924
1925
1926
1927
1928 if ((op == NVMET_FCOP_READDATA) &&
1929 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1930 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1931 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1932 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1933 }
1934
1935 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1936 if (ret) {
1937
1938
1939
1940
1941
1942 fod->abort = true;
1943
1944 if (op == NVMET_FCOP_WRITEDATA) {
1945 spin_lock_irqsave(&fod->flock, flags);
1946 fod->writedataactive = false;
1947 spin_unlock_irqrestore(&fod->flock, flags);
1948 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1949 } else {
1950 fcpreq->fcp_error = ret;
1951 fcpreq->transferred_length = 0;
1952 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1953 }
1954 }
1955}
1956
1957static inline bool
1958__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1959{
1960 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1961 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1962
1963
1964 if (abort) {
1965 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1966 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1967 return true;
1968 }
1969
1970 nvmet_fc_abort_op(tgtport, fod);
1971 return true;
1972 }
1973
1974 return false;
1975}
1976
1977
1978
1979
1980static void
1981nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1982{
1983 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1984 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1985 unsigned long flags;
1986 bool abort;
1987
1988 spin_lock_irqsave(&fod->flock, flags);
1989 abort = fod->abort;
1990 fod->writedataactive = false;
1991 spin_unlock_irqrestore(&fod->flock, flags);
1992
1993 switch (fcpreq->op) {
1994
1995 case NVMET_FCOP_WRITEDATA:
1996 if (__nvmet_fc_fod_op_abort(fod, abort))
1997 return;
1998 if (fcpreq->fcp_error ||
1999 fcpreq->transferred_length != fcpreq->transfer_length) {
2000 spin_lock(&fod->flock);
2001 fod->abort = true;
2002 spin_unlock(&fod->flock);
2003
2004 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2005 return;
2006 }
2007
2008 fod->offset += fcpreq->transferred_length;
2009 if (fod->offset != fod->req.transfer_len) {
2010 spin_lock_irqsave(&fod->flock, flags);
2011 fod->writedataactive = true;
2012 spin_unlock_irqrestore(&fod->flock, flags);
2013
2014
2015 nvmet_fc_transfer_fcp_data(tgtport, fod,
2016 NVMET_FCOP_WRITEDATA);
2017 return;
2018 }
2019
2020
2021 nvmet_req_execute(&fod->req);
2022 break;
2023
2024 case NVMET_FCOP_READDATA:
2025 case NVMET_FCOP_READDATA_RSP:
2026 if (__nvmet_fc_fod_op_abort(fod, abort))
2027 return;
2028 if (fcpreq->fcp_error ||
2029 fcpreq->transferred_length != fcpreq->transfer_length) {
2030 nvmet_fc_abort_op(tgtport, fod);
2031 return;
2032 }
2033
2034
2035
2036 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2037
2038 nvmet_fc_free_tgt_pgs(fod);
2039 nvmet_fc_free_fcp_iod(fod->queue, fod);
2040 return;
2041 }
2042
2043 fod->offset += fcpreq->transferred_length;
2044 if (fod->offset != fod->req.transfer_len) {
2045
2046 nvmet_fc_transfer_fcp_data(tgtport, fod,
2047 NVMET_FCOP_READDATA);
2048 return;
2049 }
2050
2051
2052
2053
2054 nvmet_fc_free_tgt_pgs(fod);
2055
2056 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2057
2058 break;
2059
2060 case NVMET_FCOP_RSP:
2061 if (__nvmet_fc_fod_op_abort(fod, abort))
2062 return;
2063 nvmet_fc_free_fcp_iod(fod->queue, fod);
2064 break;
2065
2066 default:
2067 break;
2068 }
2069}
2070
2071static void
2072nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2073{
2074 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2075
2076 nvmet_fc_fod_op_done(fod);
2077}
2078
2079
2080
2081
2082static void
2083__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2084 struct nvmet_fc_fcp_iod *fod, int status)
2085{
2086 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2087 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2088 unsigned long flags;
2089 bool abort;
2090
2091 spin_lock_irqsave(&fod->flock, flags);
2092 abort = fod->abort;
2093 spin_unlock_irqrestore(&fod->flock, flags);
2094
2095
2096 if (!status)
2097 fod->queue->sqhd = cqe->sq_head;
2098
2099 if (abort) {
2100 nvmet_fc_abort_op(tgtport, fod);
2101 return;
2102 }
2103
2104
2105 if (status) {
2106
2107 memset(cqe, 0, sizeof(*cqe));
2108 cqe->sq_head = fod->queue->sqhd;
2109 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2110 cqe->command_id = sqe->command_id;
2111 cqe->status = cpu_to_le16(status);
2112 } else {
2113
2114
2115
2116
2117
2118
2119 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2120
2121 nvmet_fc_transfer_fcp_data(tgtport, fod,
2122 NVMET_FCOP_READDATA);
2123 return;
2124 }
2125
2126
2127 }
2128
2129
2130 nvmet_fc_free_tgt_pgs(fod);
2131
2132 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2133}
2134
2135
2136static void
2137nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2138{
2139 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2140 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2141
2142 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2143}
2144
2145
2146
2147
2148
2149static void
2150nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2151 struct nvmet_fc_fcp_iod *fod)
2152{
2153 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2154 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2155 int ret;
2156
2157
2158
2159
2160
2161 if (!tgtport->pe)
2162 goto transport_error;
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2174
2175 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2176 fod->io_dir = NVMET_FCP_WRITE;
2177 if (!nvme_is_write(&cmdiu->sqe))
2178 goto transport_error;
2179 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2180 fod->io_dir = NVMET_FCP_READ;
2181 if (nvme_is_write(&cmdiu->sqe))
2182 goto transport_error;
2183 } else {
2184 fod->io_dir = NVMET_FCP_NODATA;
2185 if (xfrlen)
2186 goto transport_error;
2187 }
2188
2189 fod->req.cmd = &fod->cmdiubuf.sqe;
2190 fod->req.rsp = &fod->rspiubuf.cqe;
2191 fod->req.port = tgtport->pe->port;
2192
2193
2194 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2195
2196 fod->data_sg = NULL;
2197 fod->data_sg_cnt = 0;
2198
2199 ret = nvmet_req_init(&fod->req,
2200 &fod->queue->nvme_cq,
2201 &fod->queue->nvme_sq,
2202 &nvmet_fc_tgt_fcp_ops);
2203 if (!ret) {
2204
2205
2206 return;
2207 }
2208
2209 fod->req.transfer_len = xfrlen;
2210
2211
2212 atomic_inc(&fod->queue->sqtail);
2213
2214 if (fod->req.transfer_len) {
2215 ret = nvmet_fc_alloc_tgt_pgs(fod);
2216 if (ret) {
2217 nvmet_req_complete(&fod->req, ret);
2218 return;
2219 }
2220 }
2221 fod->req.sg = fod->data_sg;
2222 fod->req.sg_cnt = fod->data_sg_cnt;
2223 fod->offset = 0;
2224
2225 if (fod->io_dir == NVMET_FCP_WRITE) {
2226
2227 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2228 return;
2229 }
2230
2231
2232
2233
2234
2235
2236
2237 nvmet_req_execute(&fod->req);
2238 return;
2239
2240transport_error:
2241 nvmet_fc_abort_op(tgtport, fod);
2242}
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291int
2292nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2293 struct nvmefc_tgt_fcp_req *fcpreq,
2294 void *cmdiubuf, u32 cmdiubuf_len)
2295{
2296 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2297 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2298 struct nvmet_fc_tgt_queue *queue;
2299 struct nvmet_fc_fcp_iod *fod;
2300 struct nvmet_fc_defer_fcp_req *deferfcp;
2301 unsigned long flags;
2302
2303
2304 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2305 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2306 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2307 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2308 return -EIO;
2309
2310 queue = nvmet_fc_find_target_queue(tgtport,
2311 be64_to_cpu(cmdiu->connection_id));
2312 if (!queue)
2313 return -ENOTCONN;
2314
2315
2316
2317
2318
2319
2320
2321
2322 spin_lock_irqsave(&queue->qlock, flags);
2323
2324 fod = nvmet_fc_alloc_fcp_iod(queue);
2325 if (fod) {
2326 spin_unlock_irqrestore(&queue->qlock, flags);
2327
2328 fcpreq->nvmet_fc_private = fod;
2329 fod->fcpreq = fcpreq;
2330
2331 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2332
2333 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2334
2335 return 0;
2336 }
2337
2338 if (!tgtport->ops->defer_rcv) {
2339 spin_unlock_irqrestore(&queue->qlock, flags);
2340
2341 nvmet_fc_tgt_q_put(queue);
2342 return -ENOENT;
2343 }
2344
2345 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2346 struct nvmet_fc_defer_fcp_req, req_list);
2347 if (deferfcp) {
2348
2349 list_del(&deferfcp->req_list);
2350 } else {
2351 spin_unlock_irqrestore(&queue->qlock, flags);
2352
2353
2354 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2355 if (!deferfcp) {
2356
2357 nvmet_fc_tgt_q_put(queue);
2358 return -ENOMEM;
2359 }
2360 spin_lock_irqsave(&queue->qlock, flags);
2361 }
2362
2363
2364 fcpreq->rspaddr = cmdiubuf;
2365 fcpreq->rsplen = cmdiubuf_len;
2366 deferfcp->fcp_req = fcpreq;
2367
2368
2369 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2370
2371
2372
2373 spin_unlock_irqrestore(&queue->qlock, flags);
2374
2375 return -EOVERFLOW;
2376}
2377EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402void
2403nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2404 struct nvmefc_tgt_fcp_req *fcpreq)
2405{
2406 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2407 struct nvmet_fc_tgt_queue *queue;
2408 unsigned long flags;
2409
2410 if (!fod || fod->fcpreq != fcpreq)
2411
2412 return;
2413
2414 queue = fod->queue;
2415
2416 spin_lock_irqsave(&queue->qlock, flags);
2417 if (fod->active) {
2418
2419
2420
2421
2422
2423 spin_lock(&fod->flock);
2424 fod->abort = true;
2425 fod->aborted = true;
2426 spin_unlock(&fod->flock);
2427 }
2428 spin_unlock_irqrestore(&queue->qlock, flags);
2429}
2430EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2431
2432
2433struct nvmet_fc_traddr {
2434 u64 nn;
2435 u64 pn;
2436};
2437
2438static int
2439__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2440{
2441 u64 token64;
2442
2443 if (match_u64(sstr, &token64))
2444 return -EINVAL;
2445 *val = token64;
2446
2447 return 0;
2448}
2449
2450
2451
2452
2453
2454
2455static int
2456nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2457{
2458 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2459 substring_t wwn = { name, &name[sizeof(name)-1] };
2460 int nnoffset, pnoffset;
2461
2462
2463 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2464 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2465 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2466 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2467 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2468 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2469 NVME_FC_TRADDR_OXNNLEN;
2470 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2471 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2472 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2473 "pn-", NVME_FC_TRADDR_NNLEN))) {
2474 nnoffset = NVME_FC_TRADDR_NNLEN;
2475 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2476 } else
2477 goto out_einval;
2478
2479 name[0] = '0';
2480 name[1] = 'x';
2481 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2482
2483 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2484 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2485 goto out_einval;
2486
2487 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2488 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2489 goto out_einval;
2490
2491 return 0;
2492
2493out_einval:
2494 pr_warn("%s: bad traddr string\n", __func__);
2495 return -EINVAL;
2496}
2497
2498static int
2499nvmet_fc_add_port(struct nvmet_port *port)
2500{
2501 struct nvmet_fc_tgtport *tgtport;
2502 struct nvmet_fc_port_entry *pe;
2503 struct nvmet_fc_traddr traddr = { 0L, 0L };
2504 unsigned long flags;
2505 int ret;
2506
2507
2508 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2509 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2510 return -EINVAL;
2511
2512
2513
2514 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2515 sizeof(port->disc_addr.traddr));
2516 if (ret)
2517 return ret;
2518
2519 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2520 if (!pe)
2521 return -ENOMEM;
2522
2523 ret = -ENXIO;
2524 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2525 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2526 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2527 (tgtport->fc_target_port.port_name == traddr.pn)) {
2528
2529 if (!tgtport->pe) {
2530 nvmet_fc_portentry_bind(tgtport, pe, port);
2531 ret = 0;
2532 } else
2533 ret = -EALREADY;
2534 break;
2535 }
2536 }
2537 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2538
2539 if (ret)
2540 kfree(pe);
2541
2542 return ret;
2543}
2544
2545static void
2546nvmet_fc_remove_port(struct nvmet_port *port)
2547{
2548 struct nvmet_fc_port_entry *pe = port->priv;
2549
2550 nvmet_fc_portentry_unbind(pe);
2551
2552 kfree(pe);
2553}
2554
2555static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2556 .owner = THIS_MODULE,
2557 .type = NVMF_TRTYPE_FC,
2558 .msdbd = 1,
2559 .add_port = nvmet_fc_add_port,
2560 .remove_port = nvmet_fc_remove_port,
2561 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2562 .delete_ctrl = nvmet_fc_delete_ctrl,
2563};
2564
2565static int __init nvmet_fc_init_module(void)
2566{
2567 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2568}
2569
2570static void __exit nvmet_fc_exit_module(void)
2571{
2572
2573 if (!list_empty(&nvmet_fc_target_list))
2574 pr_warn("%s: targetport list not empty\n", __func__);
2575
2576 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2577
2578 ida_destroy(&nvmet_fc_tgtport_cnt);
2579}
2580
2581module_init(nvmet_fc_init_module);
2582module_exit(nvmet_fc_exit_module);
2583
2584MODULE_LICENSE("GPL v2");
2585