1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/init.h>
8#include <linux/miscdevice.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/parser.h>
12#include <linux/seq_file.h>
13#include "nvme.h"
14#include "fabrics.h"
15
16static LIST_HEAD(nvmf_transports);
17static DECLARE_RWSEM(nvmf_transports_rwsem);
18
19static LIST_HEAD(nvmf_hosts);
20static DEFINE_MUTEX(nvmf_hosts_mutex);
21
22static struct nvmf_host *nvmf_default_host;
23
24static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
25{
26 struct nvmf_host *host;
27
28 list_for_each_entry(host, &nvmf_hosts, list) {
29 if (!strcmp(host->nqn, hostnqn))
30 return host;
31 }
32
33 return NULL;
34}
35
36static struct nvmf_host *nvmf_host_add(const char *hostnqn)
37{
38 struct nvmf_host *host;
39
40 mutex_lock(&nvmf_hosts_mutex);
41 host = __nvmf_host_find(hostnqn);
42 if (host) {
43 kref_get(&host->ref);
44 goto out_unlock;
45 }
46
47 host = kmalloc(sizeof(*host), GFP_KERNEL);
48 if (!host)
49 goto out_unlock;
50
51 kref_init(&host->ref);
52 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
53
54 list_add_tail(&host->list, &nvmf_hosts);
55out_unlock:
56 mutex_unlock(&nvmf_hosts_mutex);
57 return host;
58}
59
60static struct nvmf_host *nvmf_host_default(void)
61{
62 struct nvmf_host *host;
63
64 host = kmalloc(sizeof(*host), GFP_KERNEL);
65 if (!host)
66 return NULL;
67
68 kref_init(&host->ref);
69 uuid_gen(&host->id);
70 snprintf(host->nqn, NVMF_NQN_SIZE,
71 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
72
73 mutex_lock(&nvmf_hosts_mutex);
74 list_add_tail(&host->list, &nvmf_hosts);
75 mutex_unlock(&nvmf_hosts_mutex);
76
77 return host;
78}
79
80static void nvmf_host_destroy(struct kref *ref)
81{
82 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
83
84 mutex_lock(&nvmf_hosts_mutex);
85 list_del(&host->list);
86 mutex_unlock(&nvmf_hosts_mutex);
87
88 kfree(host);
89}
90
91static void nvmf_host_put(struct nvmf_host *host)
92{
93 if (host)
94 kref_put(&host->ref, nvmf_host_destroy);
95}
96
97
98
99
100
101
102
103int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
104{
105 int len = 0;
106
107 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
108 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
109 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
110 len += scnprintf(buf + len, size - len, "%strsvcid=%s",
111 (len) ? "," : "", ctrl->opts->trsvcid);
112 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
113 len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
114 (len) ? "," : "", ctrl->opts->host_traddr);
115 len += scnprintf(buf + len, size - len, "\n");
116
117 return len;
118}
119EXPORT_SYMBOL_GPL(nvmf_get_address);
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
143{
144 struct nvme_command cmd;
145 union nvme_result res;
146 int ret;
147
148 memset(&cmd, 0, sizeof(cmd));
149 cmd.prop_get.opcode = nvme_fabrics_command;
150 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
151 cmd.prop_get.offset = cpu_to_le32(off);
152
153 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
154 NVME_QID_ANY, 0, 0, false);
155
156 if (ret >= 0)
157 *val = le64_to_cpu(res.u64);
158 if (unlikely(ret != 0))
159 dev_err(ctrl->device,
160 "Property Get error: %d, offset %#x\n",
161 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
162
163 return ret;
164}
165EXPORT_SYMBOL_GPL(nvmf_reg_read32);
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
189{
190 struct nvme_command cmd;
191 union nvme_result res;
192 int ret;
193
194 memset(&cmd, 0, sizeof(cmd));
195 cmd.prop_get.opcode = nvme_fabrics_command;
196 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
197 cmd.prop_get.attrib = 1;
198 cmd.prop_get.offset = cpu_to_le32(off);
199
200 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
201 NVME_QID_ANY, 0, 0, false);
202
203 if (ret >= 0)
204 *val = le64_to_cpu(res.u64);
205 if (unlikely(ret != 0))
206 dev_err(ctrl->device,
207 "Property Get error: %d, offset %#x\n",
208 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
209 return ret;
210}
211EXPORT_SYMBOL_GPL(nvmf_reg_read64);
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
235{
236 struct nvme_command cmd;
237 int ret;
238
239 memset(&cmd, 0, sizeof(cmd));
240 cmd.prop_set.opcode = nvme_fabrics_command;
241 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
242 cmd.prop_set.attrib = 0;
243 cmd.prop_set.offset = cpu_to_le32(off);
244 cmd.prop_set.value = cpu_to_le64(val);
245
246 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
247 NVME_QID_ANY, 0, 0, false);
248 if (unlikely(ret))
249 dev_err(ctrl->device,
250 "Property Set error: %d, offset %#x\n",
251 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
252 return ret;
253}
254EXPORT_SYMBOL_GPL(nvmf_reg_write32);
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
272 int errval, int offset, struct nvme_command *cmd,
273 struct nvmf_connect_data *data)
274{
275 int err_sctype = errval & (~NVME_SC_DNR);
276
277 switch (err_sctype) {
278
279 case (NVME_SC_CONNECT_INVALID_PARAM):
280 if (offset >> 16) {
281 char *inv_data = "Connect Invalid Data Parameter";
282
283 switch (offset & 0xffff) {
284 case (offsetof(struct nvmf_connect_data, cntlid)):
285 dev_err(ctrl->device,
286 "%s, cntlid: %d\n",
287 inv_data, data->cntlid);
288 break;
289 case (offsetof(struct nvmf_connect_data, hostnqn)):
290 dev_err(ctrl->device,
291 "%s, hostnqn \"%s\"\n",
292 inv_data, data->hostnqn);
293 break;
294 case (offsetof(struct nvmf_connect_data, subsysnqn)):
295 dev_err(ctrl->device,
296 "%s, subsysnqn \"%s\"\n",
297 inv_data, data->subsysnqn);
298 break;
299 default:
300 dev_err(ctrl->device,
301 "%s, starting byte offset: %d\n",
302 inv_data, offset & 0xffff);
303 break;
304 }
305 } else {
306 char *inv_sqe = "Connect Invalid SQE Parameter";
307
308 switch (offset) {
309 case (offsetof(struct nvmf_connect_command, qid)):
310 dev_err(ctrl->device,
311 "%s, qid %d\n",
312 inv_sqe, cmd->connect.qid);
313 break;
314 default:
315 dev_err(ctrl->device,
316 "%s, starting byte offset: %d\n",
317 inv_sqe, offset);
318 }
319 }
320 break;
321
322 case NVME_SC_CONNECT_INVALID_HOST:
323 dev_err(ctrl->device,
324 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
325 data->subsysnqn, data->hostnqn);
326 break;
327
328 case NVME_SC_CONNECT_CTRL_BUSY:
329 dev_err(ctrl->device,
330 "Connect command failed: controller is busy or not available\n");
331 break;
332
333 case NVME_SC_CONNECT_FORMAT:
334 dev_err(ctrl->device,
335 "Connect incompatible format: %d",
336 cmd->connect.recfmt);
337 break;
338
339 default:
340 dev_err(ctrl->device,
341 "Connect command failed, error wo/DNR bit: %d\n",
342 err_sctype);
343 break;
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
368{
369 struct nvme_command cmd;
370 union nvme_result res;
371 struct nvmf_connect_data *data;
372 int ret;
373
374 memset(&cmd, 0, sizeof(cmd));
375 cmd.connect.opcode = nvme_fabrics_command;
376 cmd.connect.fctype = nvme_fabrics_type_connect;
377 cmd.connect.qid = 0;
378 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
379
380
381
382
383
384 cmd.connect.kato = ctrl->kato ?
385 cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
386
387 if (ctrl->opts->disable_sqflow)
388 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
389
390 data = kzalloc(sizeof(*data), GFP_KERNEL);
391 if (!data)
392 return -ENOMEM;
393
394 uuid_copy(&data->hostid, &ctrl->opts->host->id);
395 data->cntlid = cpu_to_le16(0xffff);
396 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
397 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
398
399 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
400 data, sizeof(*data), 0, NVME_QID_ANY, 1,
401 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
402 if (ret) {
403 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
404 &cmd, data);
405 goto out_free_data;
406 }
407
408 ctrl->cntlid = le16_to_cpu(res.u16);
409
410out_free_data:
411 kfree(data);
412 return ret;
413}
414EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
438{
439 struct nvme_command cmd;
440 struct nvmf_connect_data *data;
441 union nvme_result res;
442 int ret;
443
444 memset(&cmd, 0, sizeof(cmd));
445 cmd.connect.opcode = nvme_fabrics_command;
446 cmd.connect.fctype = nvme_fabrics_type_connect;
447 cmd.connect.qid = cpu_to_le16(qid);
448 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
449
450 if (ctrl->opts->disable_sqflow)
451 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
452
453 data = kzalloc(sizeof(*data), GFP_KERNEL);
454 if (!data)
455 return -ENOMEM;
456
457 uuid_copy(&data->hostid, &ctrl->opts->host->id);
458 data->cntlid = cpu_to_le16(ctrl->cntlid);
459 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
460 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
461
462 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
463 data, sizeof(*data), 0, qid, 1,
464 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
465 if (ret) {
466 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
467 &cmd, data);
468 }
469 kfree(data);
470 return ret;
471}
472EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
473
474bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
475{
476 if (ctrl->opts->max_reconnects == -1 ||
477 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
478 return true;
479
480 return false;
481}
482EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
483
484
485
486
487
488
489
490
491
492
493int nvmf_register_transport(struct nvmf_transport_ops *ops)
494{
495 if (!ops->create_ctrl)
496 return -EINVAL;
497
498 down_write(&nvmf_transports_rwsem);
499 list_add_tail(&ops->entry, &nvmf_transports);
500 up_write(&nvmf_transports_rwsem);
501
502 return 0;
503}
504EXPORT_SYMBOL_GPL(nvmf_register_transport);
505
506
507
508
509
510
511
512
513
514
515void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
516{
517 down_write(&nvmf_transports_rwsem);
518 list_del(&ops->entry);
519 up_write(&nvmf_transports_rwsem);
520}
521EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
522
523static struct nvmf_transport_ops *nvmf_lookup_transport(
524 struct nvmf_ctrl_options *opts)
525{
526 struct nvmf_transport_ops *ops;
527
528 lockdep_assert_held(&nvmf_transports_rwsem);
529
530 list_for_each_entry(ops, &nvmf_transports, entry) {
531 if (strcmp(ops->name, opts->transport) == 0)
532 return ops;
533 }
534
535 return NULL;
536}
537
538
539
540
541
542
543
544
545
546
547blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
548 struct request *rq)
549{
550 if (ctrl->state != NVME_CTRL_DELETING &&
551 ctrl->state != NVME_CTRL_DEAD &&
552 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
553 return BLK_STS_RESOURCE;
554
555 nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
556 blk_mq_start_request(rq);
557 nvme_complete_rq(rq);
558 return BLK_STS_OK;
559}
560EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
561
562bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
563 bool queue_live)
564{
565 struct nvme_request *req = nvme_req(rq);
566
567
568
569
570
571 if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
572 return false;
573
574
575
576
577
578 switch (ctrl->state) {
579 case NVME_CTRL_NEW:
580 case NVME_CTRL_CONNECTING:
581 if (nvme_is_fabrics(req->cmd) &&
582 req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
583 return true;
584 break;
585 default:
586 break;
587 case NVME_CTRL_DEAD:
588 return false;
589 }
590
591 return queue_live;
592}
593EXPORT_SYMBOL_GPL(__nvmf_check_ready);
594
595static const match_table_t opt_tokens = {
596 { NVMF_OPT_TRANSPORT, "transport=%s" },
597 { NVMF_OPT_TRADDR, "traddr=%s" },
598 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
599 { NVMF_OPT_NQN, "nqn=%s" },
600 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
601 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
602 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
603 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
604 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
605 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
606 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
607 { NVMF_OPT_HOST_ID, "hostid=%s" },
608 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
609 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
610 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
611 { NVMF_OPT_DATA_DIGEST, "data_digest" },
612 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
613 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
614 { NVMF_OPT_TOS, "tos=%d" },
615 { NVMF_OPT_ERR, NULL }
616};
617
618static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
619 const char *buf)
620{
621 substring_t args[MAX_OPT_ARGS];
622 char *options, *o, *p;
623 int token, ret = 0;
624 size_t nqnlen = 0;
625 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
626 uuid_t hostid;
627
628
629 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
630 opts->nr_io_queues = num_online_cpus();
631 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
632 opts->kato = NVME_DEFAULT_KATO;
633 opts->duplicate_connect = false;
634 opts->hdr_digest = false;
635 opts->data_digest = false;
636 opts->tos = -1;
637
638 options = o = kstrdup(buf, GFP_KERNEL);
639 if (!options)
640 return -ENOMEM;
641
642 uuid_gen(&hostid);
643
644 while ((p = strsep(&o, ",\n")) != NULL) {
645 if (!*p)
646 continue;
647
648 token = match_token(p, opt_tokens, args);
649 opts->mask |= token;
650 switch (token) {
651 case NVMF_OPT_TRANSPORT:
652 p = match_strdup(args);
653 if (!p) {
654 ret = -ENOMEM;
655 goto out;
656 }
657 kfree(opts->transport);
658 opts->transport = p;
659 break;
660 case NVMF_OPT_NQN:
661 p = match_strdup(args);
662 if (!p) {
663 ret = -ENOMEM;
664 goto out;
665 }
666 kfree(opts->subsysnqn);
667 opts->subsysnqn = p;
668 nqnlen = strlen(opts->subsysnqn);
669 if (nqnlen >= NVMF_NQN_SIZE) {
670 pr_err("%s needs to be < %d bytes\n",
671 opts->subsysnqn, NVMF_NQN_SIZE);
672 ret = -EINVAL;
673 goto out;
674 }
675 opts->discovery_nqn =
676 !(strcmp(opts->subsysnqn,
677 NVME_DISC_SUBSYS_NAME));
678 break;
679 case NVMF_OPT_TRADDR:
680 p = match_strdup(args);
681 if (!p) {
682 ret = -ENOMEM;
683 goto out;
684 }
685 kfree(opts->traddr);
686 opts->traddr = p;
687 break;
688 case NVMF_OPT_TRSVCID:
689 p = match_strdup(args);
690 if (!p) {
691 ret = -ENOMEM;
692 goto out;
693 }
694 kfree(opts->trsvcid);
695 opts->trsvcid = p;
696 break;
697 case NVMF_OPT_QUEUE_SIZE:
698 if (match_int(args, &token)) {
699 ret = -EINVAL;
700 goto out;
701 }
702 if (token < NVMF_MIN_QUEUE_SIZE ||
703 token > NVMF_MAX_QUEUE_SIZE) {
704 pr_err("Invalid queue_size %d\n", token);
705 ret = -EINVAL;
706 goto out;
707 }
708 opts->queue_size = token;
709 break;
710 case NVMF_OPT_NR_IO_QUEUES:
711 if (match_int(args, &token)) {
712 ret = -EINVAL;
713 goto out;
714 }
715 if (token <= 0) {
716 pr_err("Invalid number of IOQs %d\n", token);
717 ret = -EINVAL;
718 goto out;
719 }
720 if (opts->discovery_nqn) {
721 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
722 break;
723 }
724
725 opts->nr_io_queues = min_t(unsigned int,
726 num_online_cpus(), token);
727 break;
728 case NVMF_OPT_KATO:
729 if (match_int(args, &token)) {
730 ret = -EINVAL;
731 goto out;
732 }
733
734 if (token < 0) {
735 pr_err("Invalid keep_alive_tmo %d\n", token);
736 ret = -EINVAL;
737 goto out;
738 } else if (token == 0 && !opts->discovery_nqn) {
739
740 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
741 }
742 opts->kato = token;
743 break;
744 case NVMF_OPT_CTRL_LOSS_TMO:
745 if (match_int(args, &token)) {
746 ret = -EINVAL;
747 goto out;
748 }
749
750 if (token < 0)
751 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
752 ctrl_loss_tmo = token;
753 break;
754 case NVMF_OPT_HOSTNQN:
755 if (opts->host) {
756 pr_err("hostnqn already user-assigned: %s\n",
757 opts->host->nqn);
758 ret = -EADDRINUSE;
759 goto out;
760 }
761 p = match_strdup(args);
762 if (!p) {
763 ret = -ENOMEM;
764 goto out;
765 }
766 nqnlen = strlen(p);
767 if (nqnlen >= NVMF_NQN_SIZE) {
768 pr_err("%s needs to be < %d bytes\n",
769 p, NVMF_NQN_SIZE);
770 kfree(p);
771 ret = -EINVAL;
772 goto out;
773 }
774 nvmf_host_put(opts->host);
775 opts->host = nvmf_host_add(p);
776 kfree(p);
777 if (!opts->host) {
778 ret = -ENOMEM;
779 goto out;
780 }
781 break;
782 case NVMF_OPT_RECONNECT_DELAY:
783 if (match_int(args, &token)) {
784 ret = -EINVAL;
785 goto out;
786 }
787 if (token <= 0) {
788 pr_err("Invalid reconnect_delay %d\n", token);
789 ret = -EINVAL;
790 goto out;
791 }
792 opts->reconnect_delay = token;
793 break;
794 case NVMF_OPT_HOST_TRADDR:
795 p = match_strdup(args);
796 if (!p) {
797 ret = -ENOMEM;
798 goto out;
799 }
800 kfree(opts->host_traddr);
801 opts->host_traddr = p;
802 break;
803 case NVMF_OPT_HOST_ID:
804 p = match_strdup(args);
805 if (!p) {
806 ret = -ENOMEM;
807 goto out;
808 }
809 ret = uuid_parse(p, &hostid);
810 if (ret) {
811 pr_err("Invalid hostid %s\n", p);
812 ret = -EINVAL;
813 kfree(p);
814 goto out;
815 }
816 kfree(p);
817 break;
818 case NVMF_OPT_DUP_CONNECT:
819 opts->duplicate_connect = true;
820 break;
821 case NVMF_OPT_DISABLE_SQFLOW:
822 opts->disable_sqflow = true;
823 break;
824 case NVMF_OPT_HDR_DIGEST:
825 opts->hdr_digest = true;
826 break;
827 case NVMF_OPT_DATA_DIGEST:
828 opts->data_digest = true;
829 break;
830 case NVMF_OPT_NR_WRITE_QUEUES:
831 if (match_int(args, &token)) {
832 ret = -EINVAL;
833 goto out;
834 }
835 if (token <= 0) {
836 pr_err("Invalid nr_write_queues %d\n", token);
837 ret = -EINVAL;
838 goto out;
839 }
840 opts->nr_write_queues = token;
841 break;
842 case NVMF_OPT_NR_POLL_QUEUES:
843 if (match_int(args, &token)) {
844 ret = -EINVAL;
845 goto out;
846 }
847 if (token <= 0) {
848 pr_err("Invalid nr_poll_queues %d\n", token);
849 ret = -EINVAL;
850 goto out;
851 }
852 opts->nr_poll_queues = token;
853 break;
854 case NVMF_OPT_TOS:
855 if (match_int(args, &token)) {
856 ret = -EINVAL;
857 goto out;
858 }
859 if (token < 0) {
860 pr_err("Invalid type of service %d\n", token);
861 ret = -EINVAL;
862 goto out;
863 }
864 if (token > 255) {
865 pr_warn("Clamping type of service to 255\n");
866 token = 255;
867 }
868 opts->tos = token;
869 break;
870 default:
871 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
872 p);
873 ret = -EINVAL;
874 goto out;
875 }
876 }
877
878 if (opts->discovery_nqn) {
879 opts->nr_io_queues = 0;
880 opts->nr_write_queues = 0;
881 opts->nr_poll_queues = 0;
882 opts->duplicate_connect = true;
883 }
884 if (ctrl_loss_tmo < 0)
885 opts->max_reconnects = -1;
886 else
887 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
888 opts->reconnect_delay);
889
890 if (!opts->host) {
891 kref_get(&nvmf_default_host->ref);
892 opts->host = nvmf_default_host;
893 }
894
895 uuid_copy(&opts->host->id, &hostid);
896
897out:
898 kfree(options);
899 return ret;
900}
901
902static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
903 unsigned int required_opts)
904{
905 if ((opts->mask & required_opts) != required_opts) {
906 int i;
907
908 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
909 if ((opt_tokens[i].token & required_opts) &&
910 !(opt_tokens[i].token & opts->mask)) {
911 pr_warn("missing parameter '%s'\n",
912 opt_tokens[i].pattern);
913 }
914 }
915
916 return -EINVAL;
917 }
918
919 return 0;
920}
921
922bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
923 struct nvmf_ctrl_options *opts)
924{
925 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
926 strcmp(opts->traddr, ctrl->opts->traddr) ||
927 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
928 return false;
929
930
931
932
933
934
935
936
937
938
939 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
940 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
941 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
942 return false;
943 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
944 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
945 return false;
946 }
947
948 return true;
949}
950EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
951
952static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
953 unsigned int allowed_opts)
954{
955 if (opts->mask & ~allowed_opts) {
956 int i;
957
958 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
959 if ((opt_tokens[i].token & opts->mask) &&
960 (opt_tokens[i].token & ~allowed_opts)) {
961 pr_warn("invalid parameter '%s'\n",
962 opt_tokens[i].pattern);
963 }
964 }
965
966 return -EINVAL;
967 }
968
969 return 0;
970}
971
972void nvmf_free_options(struct nvmf_ctrl_options *opts)
973{
974 nvmf_host_put(opts->host);
975 kfree(opts->transport);
976 kfree(opts->traddr);
977 kfree(opts->trsvcid);
978 kfree(opts->subsysnqn);
979 kfree(opts->host_traddr);
980 kfree(opts);
981}
982EXPORT_SYMBOL_GPL(nvmf_free_options);
983
984#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
985#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
986 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
987 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
988 NVMF_OPT_DISABLE_SQFLOW)
989
990static struct nvme_ctrl *
991nvmf_create_ctrl(struct device *dev, const char *buf)
992{
993 struct nvmf_ctrl_options *opts;
994 struct nvmf_transport_ops *ops;
995 struct nvme_ctrl *ctrl;
996 int ret;
997
998 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
999 if (!opts)
1000 return ERR_PTR(-ENOMEM);
1001
1002 ret = nvmf_parse_options(opts, buf);
1003 if (ret)
1004 goto out_free_opts;
1005
1006
1007 request_module("nvme-%s", opts->transport);
1008
1009
1010
1011
1012
1013
1014 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
1015 if (ret)
1016 goto out_free_opts;
1017 opts->mask &= ~NVMF_REQUIRED_OPTS;
1018
1019 down_read(&nvmf_transports_rwsem);
1020 ops = nvmf_lookup_transport(opts);
1021 if (!ops) {
1022 pr_info("no handler found for transport %s.\n",
1023 opts->transport);
1024 ret = -EINVAL;
1025 goto out_unlock;
1026 }
1027
1028 if (!try_module_get(ops->module)) {
1029 ret = -EBUSY;
1030 goto out_unlock;
1031 }
1032 up_read(&nvmf_transports_rwsem);
1033
1034 ret = nvmf_check_required_opts(opts, ops->required_opts);
1035 if (ret)
1036 goto out_module_put;
1037 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1038 ops->allowed_opts | ops->required_opts);
1039 if (ret)
1040 goto out_module_put;
1041
1042 ctrl = ops->create_ctrl(dev, opts);
1043 if (IS_ERR(ctrl)) {
1044 ret = PTR_ERR(ctrl);
1045 goto out_module_put;
1046 }
1047
1048 module_put(ops->module);
1049 return ctrl;
1050
1051out_module_put:
1052 module_put(ops->module);
1053 goto out_free_opts;
1054out_unlock:
1055 up_read(&nvmf_transports_rwsem);
1056out_free_opts:
1057 nvmf_free_options(opts);
1058 return ERR_PTR(ret);
1059}
1060
1061static struct class *nvmf_class;
1062static struct device *nvmf_device;
1063static DEFINE_MUTEX(nvmf_dev_mutex);
1064
1065static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1066 size_t count, loff_t *pos)
1067{
1068 struct seq_file *seq_file = file->private_data;
1069 struct nvme_ctrl *ctrl;
1070 const char *buf;
1071 int ret = 0;
1072
1073 if (count > PAGE_SIZE)
1074 return -ENOMEM;
1075
1076 buf = memdup_user_nul(ubuf, count);
1077 if (IS_ERR(buf))
1078 return PTR_ERR(buf);
1079
1080 mutex_lock(&nvmf_dev_mutex);
1081 if (seq_file->private) {
1082 ret = -EINVAL;
1083 goto out_unlock;
1084 }
1085
1086 ctrl = nvmf_create_ctrl(nvmf_device, buf);
1087 if (IS_ERR(ctrl)) {
1088 ret = PTR_ERR(ctrl);
1089 goto out_unlock;
1090 }
1091
1092 seq_file->private = ctrl;
1093
1094out_unlock:
1095 mutex_unlock(&nvmf_dev_mutex);
1096 kfree(buf);
1097 return ret ? ret : count;
1098}
1099
1100static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1101{
1102 struct nvme_ctrl *ctrl;
1103 int ret = 0;
1104
1105 mutex_lock(&nvmf_dev_mutex);
1106 ctrl = seq_file->private;
1107 if (!ctrl) {
1108 ret = -EINVAL;
1109 goto out_unlock;
1110 }
1111
1112 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1113 ctrl->instance, ctrl->cntlid);
1114
1115out_unlock:
1116 mutex_unlock(&nvmf_dev_mutex);
1117 return ret;
1118}
1119
1120static int nvmf_dev_open(struct inode *inode, struct file *file)
1121{
1122
1123
1124
1125
1126 file->private_data = NULL;
1127 return single_open(file, nvmf_dev_show, NULL);
1128}
1129
1130static int nvmf_dev_release(struct inode *inode, struct file *file)
1131{
1132 struct seq_file *seq_file = file->private_data;
1133 struct nvme_ctrl *ctrl = seq_file->private;
1134
1135 if (ctrl)
1136 nvme_put_ctrl(ctrl);
1137 return single_release(inode, file);
1138}
1139
1140static const struct file_operations nvmf_dev_fops = {
1141 .owner = THIS_MODULE,
1142 .write = nvmf_dev_write,
1143 .read = seq_read,
1144 .open = nvmf_dev_open,
1145 .release = nvmf_dev_release,
1146};
1147
1148static struct miscdevice nvmf_misc = {
1149 .minor = MISC_DYNAMIC_MINOR,
1150 .name = "nvme-fabrics",
1151 .fops = &nvmf_dev_fops,
1152};
1153
1154static int __init nvmf_init(void)
1155{
1156 int ret;
1157
1158 nvmf_default_host = nvmf_host_default();
1159 if (!nvmf_default_host)
1160 return -ENOMEM;
1161
1162 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
1163 if (IS_ERR(nvmf_class)) {
1164 pr_err("couldn't register class nvme-fabrics\n");
1165 ret = PTR_ERR(nvmf_class);
1166 goto out_free_host;
1167 }
1168
1169 nvmf_device =
1170 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1171 if (IS_ERR(nvmf_device)) {
1172 pr_err("couldn't create nvme-fabris device!\n");
1173 ret = PTR_ERR(nvmf_device);
1174 goto out_destroy_class;
1175 }
1176
1177 ret = misc_register(&nvmf_misc);
1178 if (ret) {
1179 pr_err("couldn't register misc device: %d\n", ret);
1180 goto out_destroy_device;
1181 }
1182
1183 return 0;
1184
1185out_destroy_device:
1186 device_destroy(nvmf_class, MKDEV(0, 0));
1187out_destroy_class:
1188 class_destroy(nvmf_class);
1189out_free_host:
1190 nvmf_host_put(nvmf_default_host);
1191 return ret;
1192}
1193
1194static void __exit nvmf_exit(void)
1195{
1196 misc_deregister(&nvmf_misc);
1197 device_destroy(nvmf_class, MKDEV(0, 0));
1198 class_destroy(nvmf_class);
1199 nvmf_host_put(nvmf_default_host);
1200
1201 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
1202 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1203 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1204 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1205 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1206}
1207
1208MODULE_LICENSE("GPL v2");
1209
1210module_init(nvmf_init);
1211module_exit(nvmf_exit);
1212