1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
16#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
20#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_MAX_SEGMENTS 256
25
26struct nvme_loop_iod {
27 struct nvme_request nvme_req;
28 struct nvme_command cmd;
29 struct nvme_completion rsp;
30 struct nvmet_req req;
31 struct nvme_loop_queue *queue;
32 struct work_struct work;
33 struct sg_table sg_table;
34 struct scatterlist first_sgl[];
35};
36
37struct nvme_loop_ctrl {
38 struct nvme_loop_queue *queues;
39
40 struct blk_mq_tag_set admin_tag_set;
41
42 struct list_head list;
43 struct blk_mq_tag_set tag_set;
44 struct nvme_loop_iod async_event_iod;
45 struct nvme_ctrl ctrl;
46
47 struct nvmet_ctrl *target_ctrl;
48 struct nvmet_port *port;
49};
50
51static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
52{
53 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
54}
55
56enum nvme_loop_queue_flags {
57 NVME_LOOP_Q_LIVE = 0,
58};
59
60struct nvme_loop_queue {
61 struct nvmet_cq nvme_cq;
62 struct nvmet_sq nvme_sq;
63 struct nvme_loop_ctrl *ctrl;
64 unsigned long flags;
65};
66
67static LIST_HEAD(nvme_loop_ports);
68static DEFINE_MUTEX(nvme_loop_ports_mutex);
69
70static LIST_HEAD(nvme_loop_ctrl_list);
71static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
72
73static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
74static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
75
76static const struct nvmet_fabrics_ops nvme_loop_ops;
77
78static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
79{
80 return queue - queue->ctrl->queues;
81}
82
83static void nvme_loop_complete_rq(struct request *req)
84{
85 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
86
87 nvme_cleanup_cmd(req);
88 sg_free_table_chained(&iod->sg_table, true);
89 nvme_complete_rq(req);
90}
91
92static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
93{
94 u32 queue_idx = nvme_loop_queue_idx(queue);
95
96 if (queue_idx == 0)
97 return queue->ctrl->admin_tag_set.tags[queue_idx];
98 return queue->ctrl->tag_set.tags[queue_idx - 1];
99}
100
101static void nvme_loop_queue_response(struct nvmet_req *req)
102{
103 struct nvme_loop_queue *queue =
104 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
105 struct nvme_completion *cqe = req->rsp;
106
107
108
109
110
111
112
113 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
114 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
115 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
116 &cqe->result);
117 } else {
118 struct request *rq;
119
120 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
121 if (!rq) {
122 dev_err(queue->ctrl->ctrl.device,
123 "tag 0x%x on queue %d not found\n",
124 cqe->command_id, nvme_loop_queue_idx(queue));
125 return;
126 }
127
128 nvme_end_request(rq, cqe->status, cqe->result);
129 }
130}
131
132static void nvme_loop_execute_work(struct work_struct *work)
133{
134 struct nvme_loop_iod *iod =
135 container_of(work, struct nvme_loop_iod, work);
136
137 nvmet_req_execute(&iod->req);
138}
139
140static enum blk_eh_timer_return
141nvme_loop_timeout(struct request *rq, bool reserved)
142{
143 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
144
145
146 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
147
148
149 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
150
151 return BLK_EH_DONE;
152}
153
154static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
155 const struct blk_mq_queue_data *bd)
156{
157 struct nvme_ns *ns = hctx->queue->queuedata;
158 struct nvme_loop_queue *queue = hctx->driver_data;
159 struct request *req = bd->rq;
160 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
161 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
162 blk_status_t ret;
163
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
168 if (ret)
169 return ret;
170
171 blk_mq_start_request(req);
172 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
173 iod->req.port = queue->ctrl->port;
174 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
175 &queue->nvme_sq, &nvme_loop_ops))
176 return BLK_STS_OK;
177
178 if (blk_rq_nr_phys_segments(req)) {
179 iod->sg_table.sgl = iod->first_sgl;
180 if (sg_alloc_table_chained(&iod->sg_table,
181 blk_rq_nr_phys_segments(req),
182 iod->sg_table.sgl))
183 return BLK_STS_RESOURCE;
184
185 iod->req.sg = iod->sg_table.sgl;
186 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
187 iod->req.transfer_len = blk_rq_payload_bytes(req);
188 }
189
190 schedule_work(&iod->work);
191 return BLK_STS_OK;
192}
193
194static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
195{
196 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
197 struct nvme_loop_queue *queue = &ctrl->queues[0];
198 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
199
200 memset(&iod->cmd, 0, sizeof(iod->cmd));
201 iod->cmd.common.opcode = nvme_admin_async_event;
202 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
203 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
204
205 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
206 &nvme_loop_ops)) {
207 dev_err(ctrl->ctrl.device, "failed async event work\n");
208 return;
209 }
210
211 schedule_work(&iod->work);
212}
213
214static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
215 struct nvme_loop_iod *iod, unsigned int queue_idx)
216{
217 iod->req.cmd = &iod->cmd;
218 iod->req.rsp = &iod->rsp;
219 iod->queue = &ctrl->queues[queue_idx];
220 INIT_WORK(&iod->work, nvme_loop_execute_work);
221 return 0;
222}
223
224static int nvme_loop_init_request(struct blk_mq_tag_set *set,
225 struct request *req, unsigned int hctx_idx,
226 unsigned int numa_node)
227{
228 struct nvme_loop_ctrl *ctrl = set->driver_data;
229
230 nvme_req(req)->ctrl = &ctrl->ctrl;
231 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
232 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
233}
234
235static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
236 unsigned int hctx_idx)
237{
238 struct nvme_loop_ctrl *ctrl = data;
239 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
240
241 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
242
243 hctx->driver_data = queue;
244 return 0;
245}
246
247static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
248 unsigned int hctx_idx)
249{
250 struct nvme_loop_ctrl *ctrl = data;
251 struct nvme_loop_queue *queue = &ctrl->queues[0];
252
253 BUG_ON(hctx_idx != 0);
254
255 hctx->driver_data = queue;
256 return 0;
257}
258
259static const struct blk_mq_ops nvme_loop_mq_ops = {
260 .queue_rq = nvme_loop_queue_rq,
261 .complete = nvme_loop_complete_rq,
262 .init_request = nvme_loop_init_request,
263 .init_hctx = nvme_loop_init_hctx,
264 .timeout = nvme_loop_timeout,
265};
266
267static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
268 .queue_rq = nvme_loop_queue_rq,
269 .complete = nvme_loop_complete_rq,
270 .init_request = nvme_loop_init_request,
271 .init_hctx = nvme_loop_init_admin_hctx,
272 .timeout = nvme_loop_timeout,
273};
274
275static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
276{
277 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
278 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
279 blk_cleanup_queue(ctrl->ctrl.admin_q);
280 blk_mq_free_tag_set(&ctrl->admin_tag_set);
281}
282
283static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
284{
285 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
286
287 if (list_empty(&ctrl->list))
288 goto free_ctrl;
289
290 mutex_lock(&nvme_loop_ctrl_mutex);
291 list_del(&ctrl->list);
292 mutex_unlock(&nvme_loop_ctrl_mutex);
293
294 if (nctrl->tagset) {
295 blk_cleanup_queue(ctrl->ctrl.connect_q);
296 blk_mq_free_tag_set(&ctrl->tag_set);
297 }
298 kfree(ctrl->queues);
299 nvmf_free_options(nctrl->opts);
300free_ctrl:
301 kfree(ctrl);
302}
303
304static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
305{
306 int i;
307
308 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
309 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
310 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
311 }
312}
313
314static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
315{
316 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
317 unsigned int nr_io_queues;
318 int ret, i;
319
320 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
321 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
322 if (ret || !nr_io_queues)
323 return ret;
324
325 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
326
327 for (i = 1; i <= nr_io_queues; i++) {
328 ctrl->queues[i].ctrl = ctrl;
329 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
330 if (ret)
331 goto out_destroy_queues;
332
333 ctrl->ctrl.queue_count++;
334 }
335
336 return 0;
337
338out_destroy_queues:
339 nvme_loop_destroy_io_queues(ctrl);
340 return ret;
341}
342
343static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
344{
345 int i, ret;
346
347 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
349 if (ret)
350 return ret;
351 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
352 }
353
354 return 0;
355}
356
357static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
358{
359 int error;
360
361 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
362 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
363 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
364 ctrl->admin_tag_set.reserved_tags = 2;
365 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
366 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
367 SG_CHUNK_SIZE * sizeof(struct scatterlist);
368 ctrl->admin_tag_set.driver_data = ctrl;
369 ctrl->admin_tag_set.nr_hw_queues = 1;
370 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
371 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
372
373 ctrl->queues[0].ctrl = ctrl;
374 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
375 if (error)
376 return error;
377 ctrl->ctrl.queue_count = 1;
378
379 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
380 if (error)
381 goto out_free_sq;
382 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
383
384 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
385 if (IS_ERR(ctrl->ctrl.admin_q)) {
386 error = PTR_ERR(ctrl->ctrl.admin_q);
387 goto out_free_tagset;
388 }
389
390 error = nvmf_connect_admin_queue(&ctrl->ctrl);
391 if (error)
392 goto out_cleanup_queue;
393
394 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
395
396 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
397 if (error) {
398 dev_err(ctrl->ctrl.device,
399 "prop_get NVME_REG_CAP failed\n");
400 goto out_cleanup_queue;
401 }
402
403 ctrl->ctrl.sqsize =
404 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
405
406 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
407 if (error)
408 goto out_cleanup_queue;
409
410 ctrl->ctrl.max_hw_sectors =
411 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
412
413 error = nvme_init_identify(&ctrl->ctrl);
414 if (error)
415 goto out_cleanup_queue;
416
417 return 0;
418
419out_cleanup_queue:
420 blk_cleanup_queue(ctrl->ctrl.admin_q);
421out_free_tagset:
422 blk_mq_free_tag_set(&ctrl->admin_tag_set);
423out_free_sq:
424 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
425 return error;
426}
427
428static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
429{
430 if (ctrl->ctrl.queue_count > 1) {
431 nvme_stop_queues(&ctrl->ctrl);
432 blk_mq_tagset_busy_iter(&ctrl->tag_set,
433 nvme_cancel_request, &ctrl->ctrl);
434 nvme_loop_destroy_io_queues(ctrl);
435 }
436
437 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
438 nvme_shutdown_ctrl(&ctrl->ctrl);
439
440 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
441 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
442 nvme_cancel_request, &ctrl->ctrl);
443 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
444 nvme_loop_destroy_admin_queue(ctrl);
445}
446
447static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
448{
449 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
450}
451
452static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
453{
454 struct nvme_loop_ctrl *ctrl;
455
456 mutex_lock(&nvme_loop_ctrl_mutex);
457 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
458 if (ctrl->ctrl.cntlid == nctrl->cntlid)
459 nvme_delete_ctrl(&ctrl->ctrl);
460 }
461 mutex_unlock(&nvme_loop_ctrl_mutex);
462}
463
464static void nvme_loop_reset_ctrl_work(struct work_struct *work)
465{
466 struct nvme_loop_ctrl *ctrl =
467 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
468 bool changed;
469 int ret;
470
471 nvme_stop_ctrl(&ctrl->ctrl);
472 nvme_loop_shutdown_ctrl(ctrl);
473
474 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
475
476 WARN_ON_ONCE(1);
477 return;
478 }
479
480 ret = nvme_loop_configure_admin_queue(ctrl);
481 if (ret)
482 goto out_disable;
483
484 ret = nvme_loop_init_io_queues(ctrl);
485 if (ret)
486 goto out_destroy_admin;
487
488 ret = nvme_loop_connect_io_queues(ctrl);
489 if (ret)
490 goto out_destroy_io;
491
492 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
493 ctrl->ctrl.queue_count - 1);
494
495 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
496 WARN_ON_ONCE(!changed);
497
498 nvme_start_ctrl(&ctrl->ctrl);
499
500 return;
501
502out_destroy_io:
503 nvme_loop_destroy_io_queues(ctrl);
504out_destroy_admin:
505 nvme_loop_destroy_admin_queue(ctrl);
506out_disable:
507 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
508 nvme_uninit_ctrl(&ctrl->ctrl);
509 nvme_put_ctrl(&ctrl->ctrl);
510}
511
512static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
513 .name = "loop",
514 .module = THIS_MODULE,
515 .flags = NVME_F_FABRICS,
516 .reg_read32 = nvmf_reg_read32,
517 .reg_read64 = nvmf_reg_read64,
518 .reg_write32 = nvmf_reg_write32,
519 .free_ctrl = nvme_loop_free_ctrl,
520 .submit_async_event = nvme_loop_submit_async_event,
521 .delete_ctrl = nvme_loop_delete_ctrl_host,
522 .get_address = nvmf_get_address,
523};
524
525static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
526{
527 int ret;
528
529 ret = nvme_loop_init_io_queues(ctrl);
530 if (ret)
531 return ret;
532
533 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
534 ctrl->tag_set.ops = &nvme_loop_mq_ops;
535 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
536 ctrl->tag_set.reserved_tags = 1;
537 ctrl->tag_set.numa_node = NUMA_NO_NODE;
538 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
539 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
540 SG_CHUNK_SIZE * sizeof(struct scatterlist);
541 ctrl->tag_set.driver_data = ctrl;
542 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
543 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
544 ctrl->ctrl.tagset = &ctrl->tag_set;
545
546 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
547 if (ret)
548 goto out_destroy_queues;
549
550 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
551 if (IS_ERR(ctrl->ctrl.connect_q)) {
552 ret = PTR_ERR(ctrl->ctrl.connect_q);
553 goto out_free_tagset;
554 }
555
556 ret = nvme_loop_connect_io_queues(ctrl);
557 if (ret)
558 goto out_cleanup_connect_q;
559
560 return 0;
561
562out_cleanup_connect_q:
563 blk_cleanup_queue(ctrl->ctrl.connect_q);
564out_free_tagset:
565 blk_mq_free_tag_set(&ctrl->tag_set);
566out_destroy_queues:
567 nvme_loop_destroy_io_queues(ctrl);
568 return ret;
569}
570
571static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
572{
573 struct nvmet_port *p, *found = NULL;
574
575 mutex_lock(&nvme_loop_ports_mutex);
576 list_for_each_entry(p, &nvme_loop_ports, entry) {
577
578 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
579 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
580 continue;
581 found = p;
582 break;
583 }
584 mutex_unlock(&nvme_loop_ports_mutex);
585 return found;
586}
587
588static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
589 struct nvmf_ctrl_options *opts)
590{
591 struct nvme_loop_ctrl *ctrl;
592 bool changed;
593 int ret;
594
595 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
596 if (!ctrl)
597 return ERR_PTR(-ENOMEM);
598 ctrl->ctrl.opts = opts;
599 INIT_LIST_HEAD(&ctrl->list);
600
601 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
602
603 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
604 0 );
605 if (ret)
606 goto out_put_ctrl;
607
608 ret = -ENOMEM;
609
610 ctrl->ctrl.sqsize = opts->queue_size - 1;
611 ctrl->ctrl.kato = opts->kato;
612 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
613
614 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
615 GFP_KERNEL);
616 if (!ctrl->queues)
617 goto out_uninit_ctrl;
618
619 ret = nvme_loop_configure_admin_queue(ctrl);
620 if (ret)
621 goto out_free_queues;
622
623 if (opts->queue_size > ctrl->ctrl.maxcmd) {
624
625 dev_warn(ctrl->ctrl.device,
626 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
627 opts->queue_size, ctrl->ctrl.maxcmd);
628 opts->queue_size = ctrl->ctrl.maxcmd;
629 }
630
631 if (opts->nr_io_queues) {
632 ret = nvme_loop_create_io_queues(ctrl);
633 if (ret)
634 goto out_remove_admin_queue;
635 }
636
637 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
638
639 dev_info(ctrl->ctrl.device,
640 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
641
642 nvme_get_ctrl(&ctrl->ctrl);
643
644 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
645 WARN_ON_ONCE(!changed);
646
647 mutex_lock(&nvme_loop_ctrl_mutex);
648 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
649 mutex_unlock(&nvme_loop_ctrl_mutex);
650
651 nvme_start_ctrl(&ctrl->ctrl);
652
653 return &ctrl->ctrl;
654
655out_remove_admin_queue:
656 nvme_loop_destroy_admin_queue(ctrl);
657out_free_queues:
658 kfree(ctrl->queues);
659out_uninit_ctrl:
660 nvme_uninit_ctrl(&ctrl->ctrl);
661out_put_ctrl:
662 nvme_put_ctrl(&ctrl->ctrl);
663 if (ret > 0)
664 ret = -EIO;
665 return ERR_PTR(ret);
666}
667
668static int nvme_loop_add_port(struct nvmet_port *port)
669{
670 mutex_lock(&nvme_loop_ports_mutex);
671 list_add_tail(&port->entry, &nvme_loop_ports);
672 mutex_unlock(&nvme_loop_ports_mutex);
673 return 0;
674}
675
676static void nvme_loop_remove_port(struct nvmet_port *port)
677{
678 mutex_lock(&nvme_loop_ports_mutex);
679 list_del_init(&port->entry);
680 mutex_unlock(&nvme_loop_ports_mutex);
681}
682
683static const struct nvmet_fabrics_ops nvme_loop_ops = {
684 .owner = THIS_MODULE,
685 .type = NVMF_TRTYPE_LOOP,
686 .add_port = nvme_loop_add_port,
687 .remove_port = nvme_loop_remove_port,
688 .queue_response = nvme_loop_queue_response,
689 .delete_ctrl = nvme_loop_delete_ctrl,
690};
691
692static struct nvmf_transport_ops nvme_loop_transport = {
693 .name = "loop",
694 .module = THIS_MODULE,
695 .create_ctrl = nvme_loop_create_ctrl,
696 .allowed_opts = NVMF_OPT_TRADDR,
697};
698
699static int __init nvme_loop_init_module(void)
700{
701 int ret;
702
703 ret = nvmet_register_transport(&nvme_loop_ops);
704 if (ret)
705 return ret;
706
707 ret = nvmf_register_transport(&nvme_loop_transport);
708 if (ret)
709 nvmet_unregister_transport(&nvme_loop_ops);
710
711 return ret;
712}
713
714static void __exit nvme_loop_cleanup_module(void)
715{
716 struct nvme_loop_ctrl *ctrl, *next;
717
718 nvmf_unregister_transport(&nvme_loop_transport);
719 nvmet_unregister_transport(&nvme_loop_ops);
720
721 mutex_lock(&nvme_loop_ctrl_mutex);
722 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
723 nvme_delete_ctrl(&ctrl->ctrl);
724 mutex_unlock(&nvme_loop_ctrl_mutex);
725
726 flush_workqueue(nvme_delete_wq);
727}
728
729module_init(nvme_loop_init_module);
730module_exit(nvme_loop_cleanup_module);
731
732MODULE_LICENSE("GPL v2");
733MODULE_ALIAS("nvmet-transport-254");
734