1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/scatterlist.h>
8#include <linux/blk-mq.h>
9#include <linux/nvme.h>
10#include <linux/module.h>
11#include <linux/parser.h>
12#include "nvmet.h"
13#include "../host/nvme.h"
14#include "../host/fabrics.h"
15
16#define NVME_LOOP_MAX_SEGMENTS 256
17
18struct nvme_loop_iod {
19 struct nvme_request nvme_req;
20 struct nvme_command cmd;
21 struct nvme_completion cqe;
22 struct nvmet_req req;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
27};
28
29struct nvme_loop_ctrl {
30 struct nvme_loop_queue *queues;
31
32 struct blk_mq_tag_set admin_tag_set;
33
34 struct list_head list;
35 struct blk_mq_tag_set tag_set;
36 struct nvme_loop_iod async_event_iod;
37 struct nvme_ctrl ctrl;
38
39 struct nvmet_port *port;
40};
41
42static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
43{
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
45}
46
47enum nvme_loop_queue_flags {
48 NVME_LOOP_Q_LIVE = 0,
49};
50
51struct nvme_loop_queue {
52 struct nvmet_cq nvme_cq;
53 struct nvmet_sq nvme_sq;
54 struct nvme_loop_ctrl *ctrl;
55 unsigned long flags;
56};
57
58static LIST_HEAD(nvme_loop_ports);
59static DEFINE_MUTEX(nvme_loop_ports_mutex);
60
61static LIST_HEAD(nvme_loop_ctrl_list);
62static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
63
64static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
65static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
66
67static const struct nvmet_fabrics_ops nvme_loop_ops;
68
69static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
70{
71 return queue - queue->ctrl->queues;
72}
73
74static void nvme_loop_complete_rq(struct request *req)
75{
76 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
77
78 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
79 nvme_complete_rq(req);
80}
81
82static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
83{
84 u32 queue_idx = nvme_loop_queue_idx(queue);
85
86 if (queue_idx == 0)
87 return queue->ctrl->admin_tag_set.tags[queue_idx];
88 return queue->ctrl->tag_set.tags[queue_idx - 1];
89}
90
91static void nvme_loop_queue_response(struct nvmet_req *req)
92{
93 struct nvme_loop_queue *queue =
94 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
95 struct nvme_completion *cqe = req->cqe;
96
97
98
99
100
101
102
103 if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
104 cqe->command_id))) {
105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
106 &cqe->result);
107 } else {
108 struct request *rq;
109
110 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
111 if (!rq) {
112 dev_err(queue->ctrl->ctrl.device,
113 "tag 0x%x on queue %d not found\n",
114 cqe->command_id, nvme_loop_queue_idx(queue));
115 return;
116 }
117
118 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
119 nvme_loop_complete_rq(rq);
120 }
121}
122
123static void nvme_loop_execute_work(struct work_struct *work)
124{
125 struct nvme_loop_iod *iod =
126 container_of(work, struct nvme_loop_iod, work);
127
128 iod->req.execute(&iod->req);
129}
130
131static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
132 const struct blk_mq_queue_data *bd)
133{
134 struct nvme_ns *ns = hctx->queue->queuedata;
135 struct nvme_loop_queue *queue = hctx->driver_data;
136 struct request *req = bd->rq;
137 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
138 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139 blk_status_t ret;
140
141 if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
143
144 ret = nvme_setup_cmd(ns, req);
145 if (ret)
146 return ret;
147
148 blk_mq_start_request(req);
149 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150 iod->req.port = queue->ctrl->port;
151 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
152 &queue->nvme_sq, &nvme_loop_ops))
153 return BLK_STS_OK;
154
155 if (blk_rq_nr_phys_segments(req)) {
156 iod->sg_table.sgl = iod->first_sgl;
157 if (sg_alloc_table_chained(&iod->sg_table,
158 blk_rq_nr_phys_segments(req),
159 iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160 nvme_cleanup_cmd(req);
161 return BLK_STS_RESOURCE;
162 }
163
164 iod->req.sg = iod->sg_table.sgl;
165 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166 iod->req.transfer_len = blk_rq_payload_bytes(req);
167 }
168
169 schedule_work(&iod->work);
170 return BLK_STS_OK;
171}
172
173static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
174{
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
178
179 memset(&iod->cmd, 0, sizeof(iod->cmd));
180 iod->cmd.common.opcode = nvme_admin_async_event;
181 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
182 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
183
184 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
185 &nvme_loop_ops)) {
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
187 return;
188 }
189
190 schedule_work(&iod->work);
191}
192
193static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194 struct nvme_loop_iod *iod, unsigned int queue_idx)
195{
196 iod->req.cmd = &iod->cmd;
197 iod->req.cqe = &iod->cqe;
198 iod->queue = &ctrl->queues[queue_idx];
199 INIT_WORK(&iod->work, nvme_loop_execute_work);
200 return 0;
201}
202
203static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204 struct request *req, unsigned int hctx_idx,
205 unsigned int numa_node)
206{
207 struct nvme_loop_ctrl *ctrl = set->driver_data;
208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
209
210 nvme_req(req)->ctrl = &ctrl->ctrl;
211 nvme_req(req)->cmd = &iod->cmd;
212 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
214}
215
216static struct lock_class_key loop_hctx_fq_lock_key;
217
218static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
219 unsigned int hctx_idx)
220{
221 struct nvme_loop_ctrl *ctrl = data;
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
223
224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
225
226
227
228
229
230
231
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
233
234 hctx->driver_data = queue;
235 return 0;
236}
237
238static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 unsigned int hctx_idx)
240{
241 struct nvme_loop_ctrl *ctrl = data;
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
243
244 BUG_ON(hctx_idx != 0);
245
246 hctx->driver_data = queue;
247 return 0;
248}
249
250static const struct blk_mq_ops nvme_loop_mq_ops = {
251 .queue_rq = nvme_loop_queue_rq,
252 .complete = nvme_loop_complete_rq,
253 .init_request = nvme_loop_init_request,
254 .init_hctx = nvme_loop_init_hctx,
255};
256
257static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
258 .queue_rq = nvme_loop_queue_rq,
259 .complete = nvme_loop_complete_rq,
260 .init_request = nvme_loop_init_request,
261 .init_hctx = nvme_loop_init_admin_hctx,
262};
263
264static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
265{
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
267 return;
268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
269 blk_cleanup_queue(ctrl->ctrl.admin_q);
270 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
271 blk_mq_free_tag_set(&ctrl->admin_tag_set);
272}
273
274static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
275{
276 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
277
278 if (list_empty(&ctrl->list))
279 goto free_ctrl;
280
281 mutex_lock(&nvme_loop_ctrl_mutex);
282 list_del(&ctrl->list);
283 mutex_unlock(&nvme_loop_ctrl_mutex);
284
285 if (nctrl->tagset) {
286 blk_cleanup_queue(ctrl->ctrl.connect_q);
287 blk_mq_free_tag_set(&ctrl->tag_set);
288 }
289 kfree(ctrl->queues);
290 nvmf_free_options(nctrl->opts);
291free_ctrl:
292 kfree(ctrl);
293}
294
295static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
296{
297 int i;
298
299 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
300 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
301 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
302 }
303 ctrl->ctrl.queue_count = 1;
304}
305
306static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
307{
308 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
309 unsigned int nr_io_queues;
310 int ret, i;
311
312 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
313 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
314 if (ret || !nr_io_queues)
315 return ret;
316
317 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
318
319 for (i = 1; i <= nr_io_queues; i++) {
320 ctrl->queues[i].ctrl = ctrl;
321 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
322 if (ret)
323 goto out_destroy_queues;
324
325 ctrl->ctrl.queue_count++;
326 }
327
328 return 0;
329
330out_destroy_queues:
331 nvme_loop_destroy_io_queues(ctrl);
332 return ret;
333}
334
335static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
336{
337 int i, ret;
338
339 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
340 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
341 if (ret)
342 return ret;
343 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
344 }
345
346 return 0;
347}
348
349static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
350{
351 int error;
352
353 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
354 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
355 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
356 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
357 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
358 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
359 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
360 ctrl->admin_tag_set.driver_data = ctrl;
361 ctrl->admin_tag_set.nr_hw_queues = 1;
362 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
363 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
364
365 ctrl->queues[0].ctrl = ctrl;
366 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
367 if (error)
368 return error;
369 ctrl->ctrl.queue_count = 1;
370
371 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
372 if (error)
373 goto out_free_sq;
374 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
375
376 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
377 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
378 error = PTR_ERR(ctrl->ctrl.fabrics_q);
379 goto out_free_tagset;
380 }
381
382 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
383 if (IS_ERR(ctrl->ctrl.admin_q)) {
384 error = PTR_ERR(ctrl->ctrl.admin_q);
385 goto out_cleanup_fabrics_q;
386 }
387
388 error = nvmf_connect_admin_queue(&ctrl->ctrl);
389 if (error)
390 goto out_cleanup_queue;
391
392 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
393
394 error = nvme_enable_ctrl(&ctrl->ctrl);
395 if (error)
396 goto out_cleanup_queue;
397
398 ctrl->ctrl.max_hw_sectors =
399 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
400
401 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
402
403 error = nvme_init_ctrl_finish(&ctrl->ctrl);
404 if (error)
405 goto out_cleanup_queue;
406
407 return 0;
408
409out_cleanup_queue:
410 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
411 blk_cleanup_queue(ctrl->ctrl.admin_q);
412out_cleanup_fabrics_q:
413 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
414out_free_tagset:
415 blk_mq_free_tag_set(&ctrl->admin_tag_set);
416out_free_sq:
417 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
418 return error;
419}
420
421static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
422{
423 if (ctrl->ctrl.queue_count > 1) {
424 nvme_stop_queues(&ctrl->ctrl);
425 blk_mq_tagset_busy_iter(&ctrl->tag_set,
426 nvme_cancel_request, &ctrl->ctrl);
427 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
428 nvme_loop_destroy_io_queues(ctrl);
429 }
430
431 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
433 nvme_shutdown_ctrl(&ctrl->ctrl);
434
435 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
436 nvme_cancel_request, &ctrl->ctrl);
437 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
438 nvme_loop_destroy_admin_queue(ctrl);
439}
440
441static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
442{
443 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
444}
445
446static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
447{
448 struct nvme_loop_ctrl *ctrl;
449
450 mutex_lock(&nvme_loop_ctrl_mutex);
451 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
452 if (ctrl->ctrl.cntlid == nctrl->cntlid)
453 nvme_delete_ctrl(&ctrl->ctrl);
454 }
455 mutex_unlock(&nvme_loop_ctrl_mutex);
456}
457
458static void nvme_loop_reset_ctrl_work(struct work_struct *work)
459{
460 struct nvme_loop_ctrl *ctrl =
461 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
462 int ret;
463
464 nvme_stop_ctrl(&ctrl->ctrl);
465 nvme_loop_shutdown_ctrl(ctrl);
466
467 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
468 if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
469 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
470
471 WARN_ON_ONCE(1);
472 return;
473 }
474
475 ret = nvme_loop_configure_admin_queue(ctrl);
476 if (ret)
477 goto out_disable;
478
479 ret = nvme_loop_init_io_queues(ctrl);
480 if (ret)
481 goto out_destroy_admin;
482
483 ret = nvme_loop_connect_io_queues(ctrl);
484 if (ret)
485 goto out_destroy_io;
486
487 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
488 ctrl->ctrl.queue_count - 1);
489
490 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
491 WARN_ON_ONCE(1);
492
493 nvme_start_ctrl(&ctrl->ctrl);
494
495 return;
496
497out_destroy_io:
498 nvme_loop_destroy_io_queues(ctrl);
499out_destroy_admin:
500 nvme_loop_destroy_admin_queue(ctrl);
501out_disable:
502 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
503 nvme_uninit_ctrl(&ctrl->ctrl);
504}
505
506static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
507 .name = "loop",
508 .module = THIS_MODULE,
509 .flags = NVME_F_FABRICS,
510 .reg_read32 = nvmf_reg_read32,
511 .reg_read64 = nvmf_reg_read64,
512 .reg_write32 = nvmf_reg_write32,
513 .free_ctrl = nvme_loop_free_ctrl,
514 .submit_async_event = nvme_loop_submit_async_event,
515 .delete_ctrl = nvme_loop_delete_ctrl_host,
516 .get_address = nvmf_get_address,
517};
518
519static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
520{
521 int ret;
522
523 ret = nvme_loop_init_io_queues(ctrl);
524 if (ret)
525 return ret;
526
527 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
528 ctrl->tag_set.ops = &nvme_loop_mq_ops;
529 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
530 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
531 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
532 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
533 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
534 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
535 ctrl->tag_set.driver_data = ctrl;
536 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
537 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
538 ctrl->ctrl.tagset = &ctrl->tag_set;
539
540 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
541 if (ret)
542 goto out_destroy_queues;
543
544 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
545 if (IS_ERR(ctrl->ctrl.connect_q)) {
546 ret = PTR_ERR(ctrl->ctrl.connect_q);
547 goto out_free_tagset;
548 }
549
550 ret = nvme_loop_connect_io_queues(ctrl);
551 if (ret)
552 goto out_cleanup_connect_q;
553
554 return 0;
555
556out_cleanup_connect_q:
557 blk_cleanup_queue(ctrl->ctrl.connect_q);
558out_free_tagset:
559 blk_mq_free_tag_set(&ctrl->tag_set);
560out_destroy_queues:
561 nvme_loop_destroy_io_queues(ctrl);
562 return ret;
563}
564
565static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
566{
567 struct nvmet_port *p, *found = NULL;
568
569 mutex_lock(&nvme_loop_ports_mutex);
570 list_for_each_entry(p, &nvme_loop_ports, entry) {
571
572 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
573 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
574 continue;
575 found = p;
576 break;
577 }
578 mutex_unlock(&nvme_loop_ports_mutex);
579 return found;
580}
581
582static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
583 struct nvmf_ctrl_options *opts)
584{
585 struct nvme_loop_ctrl *ctrl;
586 int ret;
587
588 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
589 if (!ctrl)
590 return ERR_PTR(-ENOMEM);
591 ctrl->ctrl.opts = opts;
592 INIT_LIST_HEAD(&ctrl->list);
593
594 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
595
596 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
597 0 );
598 if (ret) {
599 kfree(ctrl);
600 goto out;
601 }
602
603 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
604 WARN_ON_ONCE(1);
605
606 ret = -ENOMEM;
607
608 ctrl->ctrl.sqsize = opts->queue_size - 1;
609 ctrl->ctrl.kato = opts->kato;
610 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
611
612 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
613 GFP_KERNEL);
614 if (!ctrl->queues)
615 goto out_uninit_ctrl;
616
617 ret = nvme_loop_configure_admin_queue(ctrl);
618 if (ret)
619 goto out_free_queues;
620
621 if (opts->queue_size > ctrl->ctrl.maxcmd) {
622
623 dev_warn(ctrl->ctrl.device,
624 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
625 opts->queue_size, ctrl->ctrl.maxcmd);
626 opts->queue_size = ctrl->ctrl.maxcmd;
627 }
628
629 if (opts->nr_io_queues) {
630 ret = nvme_loop_create_io_queues(ctrl);
631 if (ret)
632 goto out_remove_admin_queue;
633 }
634
635 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
636
637 dev_info(ctrl->ctrl.device,
638 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
639
640 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
641 WARN_ON_ONCE(1);
642
643 mutex_lock(&nvme_loop_ctrl_mutex);
644 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
645 mutex_unlock(&nvme_loop_ctrl_mutex);
646
647 nvme_start_ctrl(&ctrl->ctrl);
648
649 return &ctrl->ctrl;
650
651out_remove_admin_queue:
652 nvme_loop_destroy_admin_queue(ctrl);
653out_free_queues:
654 kfree(ctrl->queues);
655out_uninit_ctrl:
656 nvme_uninit_ctrl(&ctrl->ctrl);
657 nvme_put_ctrl(&ctrl->ctrl);
658out:
659 if (ret > 0)
660 ret = -EIO;
661 return ERR_PTR(ret);
662}
663
664static int nvme_loop_add_port(struct nvmet_port *port)
665{
666 mutex_lock(&nvme_loop_ports_mutex);
667 list_add_tail(&port->entry, &nvme_loop_ports);
668 mutex_unlock(&nvme_loop_ports_mutex);
669 return 0;
670}
671
672static void nvme_loop_remove_port(struct nvmet_port *port)
673{
674 mutex_lock(&nvme_loop_ports_mutex);
675 list_del_init(&port->entry);
676 mutex_unlock(&nvme_loop_ports_mutex);
677
678
679
680
681
682
683
684 flush_workqueue(nvme_delete_wq);
685}
686
687static const struct nvmet_fabrics_ops nvme_loop_ops = {
688 .owner = THIS_MODULE,
689 .type = NVMF_TRTYPE_LOOP,
690 .add_port = nvme_loop_add_port,
691 .remove_port = nvme_loop_remove_port,
692 .queue_response = nvme_loop_queue_response,
693 .delete_ctrl = nvme_loop_delete_ctrl,
694};
695
696static struct nvmf_transport_ops nvme_loop_transport = {
697 .name = "loop",
698 .module = THIS_MODULE,
699 .create_ctrl = nvme_loop_create_ctrl,
700 .allowed_opts = NVMF_OPT_TRADDR,
701};
702
703static int __init nvme_loop_init_module(void)
704{
705 int ret;
706
707 ret = nvmet_register_transport(&nvme_loop_ops);
708 if (ret)
709 return ret;
710
711 ret = nvmf_register_transport(&nvme_loop_transport);
712 if (ret)
713 nvmet_unregister_transport(&nvme_loop_ops);
714
715 return ret;
716}
717
718static void __exit nvme_loop_cleanup_module(void)
719{
720 struct nvme_loop_ctrl *ctrl, *next;
721
722 nvmf_unregister_transport(&nvme_loop_transport);
723 nvmet_unregister_transport(&nvme_loop_ops);
724
725 mutex_lock(&nvme_loop_ctrl_mutex);
726 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
727 nvme_delete_ctrl(&ctrl->ctrl);
728 mutex_unlock(&nvme_loop_ctrl_mutex);
729
730 flush_workqueue(nvme_delete_wq);
731}
732
733module_init(nvme_loop_init_module);
734module_exit(nvme_loop_cleanup_module);
735
736MODULE_LICENSE("GPL v2");
737MODULE_ALIAS("nvmet-transport-254");
738