1
2
3
4
5
6
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>
9#include <linux/compat.h>
10#include <linux/delay.h>
11#include <linux/errno.h>
12#include <linux/hdreg.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/backing-dev.h>
16#include <linux/slab.h>
17#include <linux/types.h>
18#include <linux/pr.h>
19#include <linux/ptrace.h>
20#include <linux/nvme_ioctl.h>
21#include <linux/pm_qos.h>
22#include <asm/unaligned.h>
23
24#include "nvme.h"
25#include "fabrics.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#define NVME_MINORS (1U << MINORBITS)
31
32unsigned int admin_timeout = 60;
33module_param(admin_timeout, uint, 0644);
34MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
35EXPORT_SYMBOL_GPL(admin_timeout);
36
37unsigned int nvme_io_timeout = 30;
38module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
39MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
40EXPORT_SYMBOL_GPL(nvme_io_timeout);
41
42static unsigned char shutdown_timeout = 5;
43module_param(shutdown_timeout, byte, 0644);
44MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
45
46static u8 nvme_max_retries = 5;
47module_param_named(max_retries, nvme_max_retries, byte, 0644);
48MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
49
50static unsigned long default_ps_max_latency_us = 100000;
51module_param(default_ps_max_latency_us, ulong, 0644);
52MODULE_PARM_DESC(default_ps_max_latency_us,
53 "max power saving latency for new devices; use PM QOS to change per device");
54
55static bool force_apst;
56module_param(force_apst, bool, 0644);
57MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
58
59static unsigned long apst_primary_timeout_ms = 100;
60module_param(apst_primary_timeout_ms, ulong, 0644);
61MODULE_PARM_DESC(apst_primary_timeout_ms,
62 "primary APST timeout in ms");
63
64static unsigned long apst_secondary_timeout_ms = 2000;
65module_param(apst_secondary_timeout_ms, ulong, 0644);
66MODULE_PARM_DESC(apst_secondary_timeout_ms,
67 "secondary APST timeout in ms");
68
69static unsigned long apst_primary_latency_tol_us = 15000;
70module_param(apst_primary_latency_tol_us, ulong, 0644);
71MODULE_PARM_DESC(apst_primary_latency_tol_us,
72 "primary APST latency tolerance in us");
73
74static unsigned long apst_secondary_latency_tol_us = 100000;
75module_param(apst_secondary_latency_tol_us, ulong, 0644);
76MODULE_PARM_DESC(apst_secondary_latency_tol_us,
77 "secondary APST latency tolerance in us");
78
79static bool streams;
80module_param(streams, bool, 0644);
81MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
82
83
84
85
86
87
88
89
90
91
92
93
94struct workqueue_struct *nvme_wq;
95EXPORT_SYMBOL_GPL(nvme_wq);
96
97struct workqueue_struct *nvme_reset_wq;
98EXPORT_SYMBOL_GPL(nvme_reset_wq);
99
100struct workqueue_struct *nvme_delete_wq;
101EXPORT_SYMBOL_GPL(nvme_delete_wq);
102
103static LIST_HEAD(nvme_subsystems);
104static DEFINE_MUTEX(nvme_subsystems_lock);
105
106static DEFINE_IDA(nvme_instance_ida);
107static dev_t nvme_ctrl_base_chr_devt;
108static struct class *nvme_class;
109static struct class *nvme_subsys_class;
110
111static DEFINE_IDA(nvme_ns_chr_minor_ida);
112static dev_t nvme_ns_chr_devt;
113static struct class *nvme_ns_chr_class;
114
115static void nvme_put_subsystem(struct nvme_subsystem *subsys);
116static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
117 unsigned nsid);
118static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
119 struct nvme_command *cmd);
120
121static void nvme_update_bdev_size(struct gendisk *disk)
122{
123 struct block_device *bdev = bdget_disk(disk, 0);
124
125 if (bdev) {
126 bd_set_nr_sectors(bdev, get_capacity(disk));
127 bdput(bdev);
128 }
129}
130
131
132
133
134
135
136
137
138
139static void nvme_set_queue_dying(struct nvme_ns *ns)
140{
141 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
142 return;
143
144 blk_set_queue_dying(ns->queue);
145 blk_mq_unquiesce_queue(ns->queue);
146
147 set_capacity(ns->disk, 0);
148 nvme_update_bdev_size(ns->disk);
149}
150
151void nvme_queue_scan(struct nvme_ctrl *ctrl)
152{
153
154
155
156 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
157 queue_work(nvme_wq, &ctrl->scan_work);
158}
159
160
161
162
163
164
165
166int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
167{
168 if (ctrl->state != NVME_CTRL_RESETTING)
169 return -EBUSY;
170 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
171 return -EBUSY;
172 return 0;
173}
174EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
175
176static void nvme_failfast_work(struct work_struct *work)
177{
178 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
179 struct nvme_ctrl, failfast_work);
180
181 if (ctrl->state != NVME_CTRL_CONNECTING)
182 return;
183
184 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
185 dev_info(ctrl->device, "failfast expired\n");
186 nvme_kick_requeue_lists(ctrl);
187}
188
189static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
190{
191 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
192 return;
193
194 schedule_delayed_work(&ctrl->failfast_work,
195 ctrl->opts->fast_io_fail_tmo * HZ);
196}
197
198static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
199{
200 if (!ctrl->opts)
201 return;
202
203 cancel_delayed_work_sync(&ctrl->failfast_work);
204 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
205}
206
207
208int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
209{
210 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
211 return -EBUSY;
212 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
213 return -EBUSY;
214 return 0;
215}
216EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
217
218int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
219{
220 int ret;
221
222 ret = nvme_reset_ctrl(ctrl);
223 if (!ret) {
224 flush_work(&ctrl->reset_work);
225 if (ctrl->state != NVME_CTRL_LIVE)
226 ret = -ENETRESET;
227 }
228
229 return ret;
230}
231
232static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
233{
234 dev_info(ctrl->device,
235 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
236
237 flush_work(&ctrl->reset_work);
238 nvme_stop_ctrl(ctrl);
239 nvme_remove_namespaces(ctrl);
240 ctrl->ops->delete_ctrl(ctrl);
241 nvme_uninit_ctrl(ctrl);
242}
243
244static void nvme_delete_ctrl_work(struct work_struct *work)
245{
246 struct nvme_ctrl *ctrl =
247 container_of(work, struct nvme_ctrl, delete_work);
248
249 nvme_do_delete_ctrl(ctrl);
250}
251
252int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
253{
254 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
255 return -EBUSY;
256 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
257 return -EBUSY;
258 return 0;
259}
260EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
261
262static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
263{
264
265
266
267
268 nvme_get_ctrl(ctrl);
269 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
270 nvme_do_delete_ctrl(ctrl);
271 nvme_put_ctrl(ctrl);
272}
273
274static blk_status_t nvme_error_status(u16 status)
275{
276 if (unlikely(status & NVME_SC_DNR))
277 return BLK_STS_TARGET;
278
279 switch (status & 0x7ff) {
280 case NVME_SC_SUCCESS:
281 return BLK_STS_OK;
282 case NVME_SC_CAP_EXCEEDED:
283 return BLK_STS_NOSPC;
284 case NVME_SC_LBA_RANGE:
285 case NVME_SC_CMD_INTERRUPTED:
286 case NVME_SC_NS_NOT_READY:
287 return BLK_STS_TARGET;
288 case NVME_SC_BAD_ATTRIBUTES:
289 case NVME_SC_ONCS_NOT_SUPPORTED:
290 case NVME_SC_INVALID_OPCODE:
291 case NVME_SC_INVALID_FIELD:
292 case NVME_SC_INVALID_NS:
293 return BLK_STS_NOTSUPP;
294 case NVME_SC_WRITE_FAULT:
295 case NVME_SC_READ_ERROR:
296 case NVME_SC_UNWRITTEN_BLOCK:
297 case NVME_SC_ACCESS_DENIED:
298 case NVME_SC_READ_ONLY:
299 case NVME_SC_COMPARE_FAILED:
300 return BLK_STS_MEDIUM;
301 case NVME_SC_GUARD_CHECK:
302 case NVME_SC_APPTAG_CHECK:
303 case NVME_SC_REFTAG_CHECK:
304 case NVME_SC_INVALID_PI:
305 return BLK_STS_PROTECTION;
306 case NVME_SC_RESERVATION_CONFLICT:
307 return BLK_STS_NEXUS;
308 case NVME_SC_HOST_PATH_ERROR:
309 return BLK_STS_TRANSPORT;
310 case NVME_SC_ZONE_TOO_MANY_ACTIVE:
311 return BLK_STS_ZONE_ACTIVE_RESOURCE;
312 case NVME_SC_ZONE_TOO_MANY_OPEN:
313 return BLK_STS_ZONE_OPEN_RESOURCE;
314 default:
315 return BLK_STS_IOERR;
316 }
317}
318
319static void nvme_retry_req(struct request *req)
320{
321 unsigned long delay = 0;
322 u16 crd;
323
324
325 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
326 if (crd)
327 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
328
329 nvme_req(req)->retries++;
330 blk_mq_requeue_request(req, false);
331 blk_mq_delay_kick_requeue_list(req->q, delay);
332}
333
334enum nvme_disposition {
335 COMPLETE,
336 RETRY,
337 FAILOVER,
338};
339
340static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
341{
342 if (likely(nvme_req(req)->status == 0))
343 return COMPLETE;
344
345
346
347
348
349
350
351
352 if ((req->cmd_flags & (REQ_FAILFAST_DEV | REQ_FAILFAST_DRIVER)) ||
353 (nvme_req(req)->status & NVME_SC_DNR) ||
354 nvme_req(req)->retries >= nvme_max_retries)
355 return COMPLETE;
356
357 if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) {
358 if (nvme_is_path_error(nvme_req(req)->status) ||
359 blk_queue_dying(req->q))
360 return FAILOVER;
361 } else {
362 if (blk_queue_dying(req->q))
363 return COMPLETE;
364 }
365
366 return RETRY;
367}
368
369static inline void __nvme_end_req(struct request *req, blk_status_t status)
370{
371 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
372 req_op(req) == REQ_OP_ZONE_APPEND) {
373 req->__sector = nvme_lba_to_sect(req->q->queuedata,
374 le64_to_cpu(nvme_req(req)->result.u64));
375 }
376 nvme_trace_bio_complete(req);
377 blk_mq_end_request(req, status);
378}
379
380static inline void nvme_end_req(struct request *req)
381{
382 __nvme_end_req(req, nvme_error_status(nvme_req(req)->status));
383}
384
385static inline void nvme_end_req_with_failover(struct request *req)
386{
387 u16 nvme_status = nvme_req(req)->status;
388 blk_status_t status = nvme_error_status(nvme_status);
389
390 if (unlikely(nvme_status & NVME_SC_DNR))
391 goto out;
392
393 nvme_update_ana(req);
394
395 if (!blk_path_error(status)) {
396 pr_debug("Request meant for failover but blk_status_t (errno=%d) was not retryable.\n",
397 blk_status_to_errno(status));
398 status = BLK_STS_IOERR;
399 }
400out:
401 __nvme_end_req(req, status);
402}
403
404void nvme_complete_rq(struct request *req)
405{
406 trace_nvme_complete_rq(req);
407 nvme_cleanup_cmd(req);
408
409 if (nvme_req(req)->ctrl->kas)
410 nvme_req(req)->ctrl->comp_seen = true;
411
412 switch (nvme_decide_disposition(req)) {
413 case COMPLETE:
414 nvme_end_req(req);
415 return;
416 case RETRY:
417 nvme_retry_req(req);
418 return;
419 case FAILOVER:
420 if (req->cmd_flags & REQ_NVME_MPATH)
421 nvme_failover_req(req);
422 else
423 nvme_end_req_with_failover(req);
424 return;
425 }
426}
427EXPORT_SYMBOL_GPL(nvme_complete_rq);
428
429
430
431
432
433
434
435blk_status_t nvme_host_path_error(struct request *req)
436{
437 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
438 blk_mq_set_request_complete(req);
439 nvme_complete_rq(req);
440 return BLK_STS_OK;
441}
442EXPORT_SYMBOL_GPL(nvme_host_path_error);
443
444bool nvme_cancel_request(struct request *req, void *data, bool reserved)
445{
446 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
447 "Cancelling I/O %d", req->tag);
448
449
450 if (blk_mq_request_completed(req))
451 return true;
452
453 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
454 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
455 blk_mq_complete_request(req);
456 return true;
457}
458EXPORT_SYMBOL_GPL(nvme_cancel_request);
459
460void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
461{
462 if (ctrl->tagset) {
463 blk_mq_tagset_busy_iter(ctrl->tagset,
464 nvme_cancel_request, ctrl);
465 blk_mq_tagset_wait_completed_request(ctrl->tagset);
466 }
467}
468EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
469
470void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
471{
472 if (ctrl->admin_tagset) {
473 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
474 nvme_cancel_request, ctrl);
475 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
476 }
477}
478EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
479
480bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
481 enum nvme_ctrl_state new_state)
482{
483 enum nvme_ctrl_state old_state;
484 unsigned long flags;
485 bool changed = false;
486
487 spin_lock_irqsave(&ctrl->lock, flags);
488
489 old_state = ctrl->state;
490 switch (new_state) {
491 case NVME_CTRL_LIVE:
492 switch (old_state) {
493 case NVME_CTRL_NEW:
494 case NVME_CTRL_RESETTING:
495 case NVME_CTRL_CONNECTING:
496 changed = true;
497
498 default:
499 break;
500 }
501 break;
502 case NVME_CTRL_RESETTING:
503 switch (old_state) {
504 case NVME_CTRL_NEW:
505 case NVME_CTRL_LIVE:
506 changed = true;
507
508 default:
509 break;
510 }
511 break;
512 case NVME_CTRL_CONNECTING:
513 switch (old_state) {
514 case NVME_CTRL_NEW:
515 case NVME_CTRL_RESETTING:
516 changed = true;
517
518 default:
519 break;
520 }
521 break;
522 case NVME_CTRL_DELETING:
523 switch (old_state) {
524 case NVME_CTRL_LIVE:
525 case NVME_CTRL_RESETTING:
526 case NVME_CTRL_CONNECTING:
527 changed = true;
528
529 default:
530 break;
531 }
532 break;
533 case NVME_CTRL_DELETING_NOIO:
534 switch (old_state) {
535 case NVME_CTRL_DELETING:
536 case NVME_CTRL_DEAD:
537 changed = true;
538
539 default:
540 break;
541 }
542 break;
543 case NVME_CTRL_DEAD:
544 switch (old_state) {
545 case NVME_CTRL_DELETING:
546 changed = true;
547
548 default:
549 break;
550 }
551 break;
552 default:
553 break;
554 }
555
556 if (changed) {
557 ctrl->state = new_state;
558 wake_up_all(&ctrl->state_wq);
559 }
560
561 spin_unlock_irqrestore(&ctrl->lock, flags);
562 if (!changed)
563 return false;
564
565 if (ctrl->state == NVME_CTRL_LIVE) {
566 if (old_state == NVME_CTRL_CONNECTING)
567 nvme_stop_failfast_work(ctrl);
568 nvme_kick_requeue_lists(ctrl);
569 } else if (ctrl->state == NVME_CTRL_CONNECTING &&
570 old_state == NVME_CTRL_RESETTING) {
571 nvme_start_failfast_work(ctrl);
572 }
573 return changed;
574}
575EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
576
577
578
579
580static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
581{
582 switch (ctrl->state) {
583 case NVME_CTRL_NEW:
584 case NVME_CTRL_LIVE:
585 case NVME_CTRL_RESETTING:
586 case NVME_CTRL_CONNECTING:
587 return false;
588 case NVME_CTRL_DELETING:
589 case NVME_CTRL_DELETING_NOIO:
590 case NVME_CTRL_DEAD:
591 return true;
592 default:
593 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
594 return true;
595 }
596}
597
598
599
600
601
602bool nvme_wait_reset(struct nvme_ctrl *ctrl)
603{
604 wait_event(ctrl->state_wq,
605 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
606 nvme_state_terminal(ctrl));
607 return ctrl->state == NVME_CTRL_RESETTING;
608}
609EXPORT_SYMBOL_GPL(nvme_wait_reset);
610
611static void nvme_free_ns_head(struct kref *ref)
612{
613 struct nvme_ns_head *head =
614 container_of(ref, struct nvme_ns_head, ref);
615
616 nvme_mpath_remove_disk(head);
617 ida_simple_remove(&head->subsys->ns_ida, head->instance);
618 cleanup_srcu_struct(&head->srcu);
619 nvme_put_subsystem(head->subsys);
620 kfree(head);
621}
622
623bool nvme_tryget_ns_head(struct nvme_ns_head *head)
624{
625 return kref_get_unless_zero(&head->ref);
626}
627
628void nvme_put_ns_head(struct nvme_ns_head *head)
629{
630 kref_put(&head->ref, nvme_free_ns_head);
631}
632
633static void nvme_free_ns(struct kref *kref)
634{
635 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
636
637 if (ns->ndev)
638 nvme_nvm_unregister(ns);
639
640 put_disk(ns->disk);
641 nvme_put_ns_head(ns->head);
642 nvme_put_ctrl(ns->ctrl);
643 kfree(ns);
644}
645
646static inline bool nvme_get_ns(struct nvme_ns *ns)
647{
648 return kref_get_unless_zero(&ns->kref);
649}
650
651void nvme_put_ns(struct nvme_ns *ns)
652{
653 kref_put(&ns->kref, nvme_free_ns);
654}
655EXPORT_SYMBOL_GPL(nvme_put_ns);
656
657static inline void nvme_clear_nvme_request(struct request *req)
658{
659 nvme_req(req)->status = 0;
660 nvme_req(req)->retries = 0;
661 nvme_req(req)->flags = 0;
662 req->rq_flags |= RQF_DONTPREP;
663}
664
665static inline unsigned int nvme_req_op(struct nvme_command *cmd)
666{
667 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
668}
669
670static inline void nvme_init_request(struct request *req,
671 struct nvme_command *cmd)
672{
673 if (req->q->queuedata)
674 req->timeout = NVME_IO_TIMEOUT;
675 else
676 req->timeout = NVME_ADMIN_TIMEOUT;
677
678
679 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
680
681 req->cmd_flags |= REQ_FAILFAST_DRIVER;
682 if (req->mq_hctx->type == HCTX_TYPE_POLL)
683 req->cmd_flags |= REQ_HIPRI;
684 nvme_clear_nvme_request(req);
685 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
686}
687
688struct request *nvme_alloc_request(struct request_queue *q,
689 struct nvme_command *cmd, blk_mq_req_flags_t flags)
690{
691 struct request *req;
692
693 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
694 if (!IS_ERR(req))
695 nvme_init_request(req, cmd);
696 return req;
697}
698EXPORT_SYMBOL_GPL(nvme_alloc_request);
699
700static struct request *nvme_alloc_request_qid(struct request_queue *q,
701 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
702{
703 struct request *req;
704
705 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
706 qid ? qid - 1 : 0);
707 if (!IS_ERR(req))
708 nvme_init_request(req, cmd);
709 return req;
710}
711
712
713
714
715
716
717
718
719
720
721blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
722 struct request *rq)
723{
724 if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
725 ctrl->state != NVME_CTRL_DEAD &&
726 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
727 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
728 return BLK_STS_RESOURCE;
729 return nvme_host_path_error(rq);
730}
731EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
732
733bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
734 bool queue_live)
735{
736 struct nvme_request *req = nvme_req(rq);
737
738
739
740
741
742
743
744
745
746 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
747 return false;
748
749 if (ctrl->ops->flags & NVME_F_FABRICS) {
750
751
752
753
754
755 switch (ctrl->state) {
756 case NVME_CTRL_CONNECTING:
757 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
758 req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
759 return true;
760 break;
761 default:
762 break;
763 case NVME_CTRL_DEAD:
764 return false;
765 }
766 }
767
768 return queue_live;
769}
770EXPORT_SYMBOL_GPL(__nvme_check_ready);
771
772static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
773{
774 struct nvme_command c = { };
775
776 c.directive.opcode = nvme_admin_directive_send;
777 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
778 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
779 c.directive.dtype = NVME_DIR_IDENTIFY;
780 c.directive.tdtype = NVME_DIR_STREAMS;
781 c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
782
783 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
784}
785
786static int nvme_disable_streams(struct nvme_ctrl *ctrl)
787{
788 return nvme_toggle_streams(ctrl, false);
789}
790
791static int nvme_enable_streams(struct nvme_ctrl *ctrl)
792{
793 return nvme_toggle_streams(ctrl, true);
794}
795
796static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
797 struct streams_directive_params *s, u32 nsid)
798{
799 struct nvme_command c = { };
800
801 memset(s, 0, sizeof(*s));
802
803 c.directive.opcode = nvme_admin_directive_recv;
804 c.directive.nsid = cpu_to_le32(nsid);
805 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
806 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
807 c.directive.dtype = NVME_DIR_STREAMS;
808
809 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
810}
811
812static int nvme_configure_directives(struct nvme_ctrl *ctrl)
813{
814 struct streams_directive_params s;
815 int ret;
816
817 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
818 return 0;
819 if (!streams)
820 return 0;
821
822 ret = nvme_enable_streams(ctrl);
823 if (ret)
824 return ret;
825
826 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
827 if (ret)
828 goto out_disable_stream;
829
830 ctrl->nssa = le16_to_cpu(s.nssa);
831 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
832 dev_info(ctrl->device, "too few streams (%u) available\n",
833 ctrl->nssa);
834 goto out_disable_stream;
835 }
836
837 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
838 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
839 return 0;
840
841out_disable_stream:
842 nvme_disable_streams(ctrl);
843 return ret;
844}
845
846
847
848
849
850static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
851 struct request *req, u16 *control,
852 u32 *dsmgmt)
853{
854 enum rw_hint streamid = req->write_hint;
855
856 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
857 streamid = 0;
858 else {
859 streamid--;
860 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
861 return;
862
863 *control |= NVME_RW_DTYPE_STREAMS;
864 *dsmgmt |= streamid << 16;
865 }
866
867 if (streamid < ARRAY_SIZE(req->q->write_hints))
868 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
869}
870
871static inline void nvme_setup_flush(struct nvme_ns *ns,
872 struct nvme_command *cmnd)
873{
874 cmnd->common.opcode = nvme_cmd_flush;
875 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
876}
877
878static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
879 struct nvme_command *cmnd)
880{
881 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
882 struct nvme_dsm_range *range;
883 struct bio *bio;
884
885
886
887
888
889
890 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
891
892 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
893 if (!range) {
894
895
896
897
898
899 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
900 return BLK_STS_RESOURCE;
901
902 range = page_address(ns->ctrl->discard_page);
903 }
904
905 __rq_for_each_bio(bio, req) {
906 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
907 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
908
909 if (n < segments) {
910 range[n].cattr = cpu_to_le32(0);
911 range[n].nlb = cpu_to_le32(nlb);
912 range[n].slba = cpu_to_le64(slba);
913 }
914 n++;
915 }
916
917 if (WARN_ON_ONCE(n != segments)) {
918 if (virt_to_page(range) == ns->ctrl->discard_page)
919 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
920 else
921 kfree(range);
922 return BLK_STS_IOERR;
923 }
924
925 cmnd->dsm.opcode = nvme_cmd_dsm;
926 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
927 cmnd->dsm.nr = cpu_to_le32(segments - 1);
928 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
929
930 req->special_vec.bv_page = virt_to_page(range);
931 req->special_vec.bv_offset = offset_in_page(range);
932 req->special_vec.bv_len = alloc_size;
933 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
934
935 return BLK_STS_OK;
936}
937
938static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
939 struct request *req, struct nvme_command *cmnd)
940{
941 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
942 return nvme_setup_discard(ns, req, cmnd);
943
944 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
945 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
946 cmnd->write_zeroes.slba =
947 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
948 cmnd->write_zeroes.length =
949 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
950 if (nvme_ns_has_pi(ns))
951 cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
952 else
953 cmnd->write_zeroes.control = 0;
954 return BLK_STS_OK;
955}
956
957static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
958 struct request *req, struct nvme_command *cmnd,
959 enum nvme_opcode op)
960{
961 struct nvme_ctrl *ctrl = ns->ctrl;
962 u16 control = 0;
963 u32 dsmgmt = 0;
964
965 if (req->cmd_flags & REQ_FUA)
966 control |= NVME_RW_FUA;
967 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
968 control |= NVME_RW_LR;
969
970 if (req->cmd_flags & REQ_RAHEAD)
971 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
972
973 cmnd->rw.opcode = op;
974 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
975 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
976 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
977
978 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
979 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
980
981 if (ns->ms) {
982
983
984
985
986
987
988 if (!blk_integrity_rq(req)) {
989 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
990 return BLK_STS_NOTSUPP;
991 control |= NVME_RW_PRINFO_PRACT;
992 }
993
994 switch (ns->pi_type) {
995 case NVME_NS_DPS_PI_TYPE3:
996 control |= NVME_RW_PRINFO_PRCHK_GUARD;
997 break;
998 case NVME_NS_DPS_PI_TYPE1:
999 case NVME_NS_DPS_PI_TYPE2:
1000 control |= NVME_RW_PRINFO_PRCHK_GUARD |
1001 NVME_RW_PRINFO_PRCHK_REF;
1002 if (op == nvme_cmd_zone_append)
1003 control |= NVME_RW_APPEND_PIREMAP;
1004 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
1005 break;
1006 }
1007 }
1008 cmnd->rw.control = cpu_to_le16(control);
1009 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
1010 return 0;
1011}
1012
1013void nvme_cleanup_cmd(struct request *req)
1014{
1015 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
1016 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
1017 struct page *page = req->special_vec.bv_page;
1018
1019 if (page == ctrl->discard_page)
1020 clear_bit_unlock(0, &ctrl->discard_page_busy);
1021 else
1022 kfree(page_address(page) + req->special_vec.bv_offset);
1023 }
1024}
1025EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
1026
1027blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
1028{
1029 struct nvme_command *cmd = nvme_req(req)->cmd;
1030 blk_status_t ret = BLK_STS_OK;
1031
1032 if (!(req->rq_flags & RQF_DONTPREP)) {
1033 nvme_clear_nvme_request(req);
1034 memset(cmd, 0, sizeof(*cmd));
1035 }
1036
1037 switch (req_op(req)) {
1038 case REQ_OP_DRV_IN:
1039 case REQ_OP_DRV_OUT:
1040
1041 break;
1042 case REQ_OP_FLUSH:
1043 nvme_setup_flush(ns, cmd);
1044 break;
1045 case REQ_OP_ZONE_RESET_ALL:
1046 case REQ_OP_ZONE_RESET:
1047 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1048 break;
1049 case REQ_OP_ZONE_OPEN:
1050 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1051 break;
1052 case REQ_OP_ZONE_CLOSE:
1053 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1054 break;
1055 case REQ_OP_ZONE_FINISH:
1056 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1057 break;
1058 case REQ_OP_WRITE_ZEROES:
1059 ret = nvme_setup_write_zeroes(ns, req, cmd);
1060 break;
1061 case REQ_OP_DISCARD:
1062 ret = nvme_setup_discard(ns, req, cmd);
1063 break;
1064 case REQ_OP_READ:
1065 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1066 break;
1067 case REQ_OP_WRITE:
1068 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1069 break;
1070 case REQ_OP_ZONE_APPEND:
1071 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1072 break;
1073 default:
1074 WARN_ON_ONCE(1);
1075 return BLK_STS_IOERR;
1076 }
1077
1078 cmd->common.command_id = nvme_cid(req);
1079 trace_nvme_setup_cmd(req, cmd);
1080 return ret;
1081}
1082EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1083
1084
1085
1086
1087
1088
1089
1090static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
1091 bool at_head)
1092{
1093 blk_status_t status;
1094
1095 status = blk_execute_rq_rh(rq->q, disk, rq, at_head);
1096 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1097 return -EINTR;
1098 if (nvme_req(rq)->status)
1099 return nvme_req(rq)->status;
1100 return blk_status_to_errno(status);
1101}
1102
1103
1104
1105
1106
1107int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1108 union nvme_result *result, void *buffer, unsigned bufflen,
1109 unsigned timeout, int qid, int at_head,
1110 blk_mq_req_flags_t flags)
1111{
1112 struct request *req;
1113 int ret;
1114
1115 if (qid == NVME_QID_ANY)
1116 req = nvme_alloc_request(q, cmd, flags);
1117 else
1118 req = nvme_alloc_request_qid(q, cmd, flags, qid);
1119 if (IS_ERR(req))
1120 return PTR_ERR(req);
1121
1122 if (timeout)
1123 req->timeout = timeout;
1124
1125 if (buffer && bufflen) {
1126 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1127 if (ret)
1128 goto out;
1129 }
1130
1131 ret = nvme_execute_rq(NULL, req, at_head);
1132 if (result && ret >= 0)
1133 *result = nvme_req(req)->result;
1134 out:
1135 blk_mq_free_request(req);
1136 return ret;
1137}
1138EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1139
1140int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1141 void *buffer, unsigned bufflen)
1142{
1143 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1144 NVME_QID_ANY, 0, 0);
1145}
1146EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1147
1148static u32 nvme_known_admin_effects(u8 opcode)
1149{
1150 switch (opcode) {
1151 case nvme_admin_format_nvm:
1152 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1153 NVME_CMD_EFFECTS_CSE_MASK;
1154 case nvme_admin_sanitize_nvm:
1155 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1156 default:
1157 break;
1158 }
1159 return 0;
1160}
1161
1162u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1163{
1164 u32 effects = 0;
1165
1166 if (ns) {
1167 if (ns->head->effects)
1168 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1169 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1170 dev_warn_once(ctrl->device,
1171 "IO command:%02x has unhandled effects:%08x\n",
1172 opcode, effects);
1173 return 0;
1174 }
1175
1176 if (ctrl->effects)
1177 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1178 effects |= nvme_known_admin_effects(opcode);
1179
1180 return effects;
1181}
1182EXPORT_SYMBOL_GPL(nvme_command_effects);
1183
1184static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1185 u8 opcode)
1186{
1187 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1188
1189
1190
1191
1192
1193 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1194 mutex_lock(&ctrl->scan_lock);
1195 mutex_lock(&ctrl->subsys->lock);
1196 nvme_mpath_start_freeze(ctrl->subsys);
1197 nvme_mpath_wait_freeze(ctrl->subsys);
1198 nvme_start_freeze(ctrl);
1199 nvme_wait_freeze(ctrl);
1200 }
1201 return effects;
1202}
1203
1204static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1205 struct nvme_command *cmd, int status)
1206{
1207 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1208 nvme_unfreeze(ctrl);
1209 nvme_mpath_unfreeze(ctrl->subsys);
1210 mutex_unlock(&ctrl->subsys->lock);
1211 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1212 mutex_unlock(&ctrl->scan_lock);
1213 }
1214 if (effects & NVME_CMD_EFFECTS_CCC)
1215 nvme_init_ctrl_finish(ctrl);
1216 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1217 nvme_queue_scan(ctrl);
1218 flush_work(&ctrl->scan_work);
1219 }
1220
1221 switch (cmd->common.opcode) {
1222 case nvme_admin_set_features:
1223 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1224 case NVME_FEAT_KATO:
1225
1226
1227
1228
1229
1230 if (!status)
1231 nvme_update_keep_alive(ctrl, cmd);
1232 break;
1233 default:
1234 break;
1235 }
1236 break;
1237 default:
1238 break;
1239 }
1240}
1241
1242int nvme_execute_passthru_rq(struct request *rq)
1243{
1244 struct nvme_command *cmd = nvme_req(rq)->cmd;
1245 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
1246 struct nvme_ns *ns = rq->q->queuedata;
1247 struct gendisk *disk = ns ? ns->disk : NULL;
1248 u32 effects;
1249 int ret;
1250
1251 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1252 ret = nvme_execute_rq(disk, rq, false);
1253 if (effects)
1254 nvme_passthru_end(ctrl, effects, cmd, ret);
1255
1256 return ret;
1257}
1258EXPORT_SYMBOL_GPL(nvme_execute_passthru_rq);
1259
1260
1261
1262
1263
1264
1265
1266static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1267{
1268 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1269}
1270
1271static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1272{
1273 struct nvme_ctrl *ctrl = rq->end_io_data;
1274 unsigned long flags;
1275 bool startka = false;
1276
1277 blk_mq_free_request(rq);
1278
1279 if (status) {
1280 dev_err(ctrl->device,
1281 "failed nvme_keep_alive_end_io error=%d\n",
1282 status);
1283 return;
1284 }
1285
1286 ctrl->comp_seen = false;
1287 spin_lock_irqsave(&ctrl->lock, flags);
1288 if (ctrl->state == NVME_CTRL_LIVE ||
1289 ctrl->state == NVME_CTRL_CONNECTING)
1290 startka = true;
1291 spin_unlock_irqrestore(&ctrl->lock, flags);
1292 if (startka)
1293 nvme_queue_keep_alive_work(ctrl);
1294}
1295
1296static void nvme_keep_alive_work(struct work_struct *work)
1297{
1298 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1299 struct nvme_ctrl, ka_work);
1300 bool comp_seen = ctrl->comp_seen;
1301 struct request *rq;
1302
1303 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1304 dev_dbg(ctrl->device,
1305 "reschedule traffic based keep-alive timer\n");
1306 ctrl->comp_seen = false;
1307 nvme_queue_keep_alive_work(ctrl);
1308 return;
1309 }
1310
1311 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
1312 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1313 if (IS_ERR(rq)) {
1314
1315 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1316 nvme_reset_ctrl(ctrl);
1317 return;
1318 }
1319
1320 rq->timeout = ctrl->kato * HZ;
1321 rq->end_io_data = ctrl;
1322 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
1323}
1324
1325static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1326{
1327 if (unlikely(ctrl->kato == 0))
1328 return;
1329
1330 nvme_queue_keep_alive_work(ctrl);
1331}
1332
1333void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1334{
1335 if (unlikely(ctrl->kato == 0))
1336 return;
1337
1338 cancel_delayed_work_sync(&ctrl->ka_work);
1339}
1340EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1341
1342static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1343 struct nvme_command *cmd)
1344{
1345 unsigned int new_kato =
1346 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1347
1348 dev_info(ctrl->device,
1349 "keep alive interval updated from %u ms to %u ms\n",
1350 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1351
1352 nvme_stop_keep_alive(ctrl);
1353 ctrl->kato = new_kato;
1354 nvme_start_keep_alive(ctrl);
1355}
1356
1357
1358
1359
1360
1361
1362
1363static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1364{
1365 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1366 return ctrl->vs < NVME_VS(1, 2, 0);
1367 return ctrl->vs < NVME_VS(1, 1, 0);
1368}
1369
1370static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1371{
1372 struct nvme_command c = { };
1373 int error;
1374
1375
1376 c.identify.opcode = nvme_admin_identify;
1377 c.identify.cns = NVME_ID_CNS_CTRL;
1378
1379 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1380 if (!*id)
1381 return -ENOMEM;
1382
1383 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1384 sizeof(struct nvme_id_ctrl));
1385 if (error)
1386 kfree(*id);
1387 return error;
1388}
1389
1390static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1391 struct nvme_ns_id_desc *cur, bool *csi_seen)
1392{
1393 const char *warn_str = "ctrl returned bogus length:";
1394 void *data = cur;
1395
1396 switch (cur->nidt) {
1397 case NVME_NIDT_EUI64:
1398 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1399 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1400 warn_str, cur->nidl);
1401 return -1;
1402 }
1403 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1404 return NVME_NIDT_EUI64_LEN;
1405 case NVME_NIDT_NGUID:
1406 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1407 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1408 warn_str, cur->nidl);
1409 return -1;
1410 }
1411 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1412 return NVME_NIDT_NGUID_LEN;
1413 case NVME_NIDT_UUID:
1414 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1415 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1416 warn_str, cur->nidl);
1417 return -1;
1418 }
1419 uuid_copy(&ids->uuid, data + sizeof(*cur));
1420 return NVME_NIDT_UUID_LEN;
1421 case NVME_NIDT_CSI:
1422 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1423 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1424 warn_str, cur->nidl);
1425 return -1;
1426 }
1427 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1428 *csi_seen = true;
1429 return NVME_NIDT_CSI_LEN;
1430 default:
1431
1432 return cur->nidl;
1433 }
1434}
1435
1436static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1437 struct nvme_ns_ids *ids)
1438{
1439 struct nvme_command c = { };
1440 bool csi_seen = false;
1441 int status, pos, len;
1442 void *data;
1443
1444 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1445 return 0;
1446 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1447 return 0;
1448
1449 c.identify.opcode = nvme_admin_identify;
1450 c.identify.nsid = cpu_to_le32(nsid);
1451 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1452
1453 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1454 if (!data)
1455 return -ENOMEM;
1456
1457 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1458 NVME_IDENTIFY_DATA_SIZE);
1459 if (status) {
1460 dev_warn(ctrl->device,
1461 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1462 nsid, status);
1463 goto free_data;
1464 }
1465
1466 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1467 struct nvme_ns_id_desc *cur = data + pos;
1468
1469 if (cur->nidl == 0)
1470 break;
1471
1472 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1473 if (len < 0)
1474 break;
1475
1476 len += sizeof(*cur);
1477 }
1478
1479 if (nvme_multi_css(ctrl) && !csi_seen) {
1480 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1481 nsid);
1482 status = -EINVAL;
1483 }
1484
1485free_data:
1486 kfree(data);
1487 return status;
1488}
1489
1490static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1491 struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1492{
1493 struct nvme_command c = { };
1494 int error;
1495
1496
1497 c.identify.opcode = nvme_admin_identify;
1498 c.identify.nsid = cpu_to_le32(nsid);
1499 c.identify.cns = NVME_ID_CNS_NS;
1500
1501 *id = kmalloc(sizeof(**id), GFP_KERNEL);
1502 if (!*id)
1503 return -ENOMEM;
1504
1505 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1506 if (error) {
1507 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1508 goto out_free_id;
1509 }
1510
1511 error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1512 if ((*id)->ncap == 0)
1513 goto out_free_id;
1514
1515 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1516 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1517 memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1518 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1519 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1520 memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1521
1522 return 0;
1523
1524out_free_id:
1525 kfree(*id);
1526 return error;
1527}
1528
1529static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1530 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1531{
1532 union nvme_result res = { 0 };
1533 struct nvme_command c = { };
1534 int ret;
1535
1536 c.features.opcode = op;
1537 c.features.fid = cpu_to_le32(fid);
1538 c.features.dword11 = cpu_to_le32(dword11);
1539
1540 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1541 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1542 if (ret >= 0 && result)
1543 *result = le32_to_cpu(res.u32);
1544 return ret;
1545}
1546
1547int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1548 unsigned int dword11, void *buffer, size_t buflen,
1549 u32 *result)
1550{
1551 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1552 buflen, result);
1553}
1554EXPORT_SYMBOL_GPL(nvme_set_features);
1555
1556int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1557 unsigned int dword11, void *buffer, size_t buflen,
1558 u32 *result)
1559{
1560 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1561 buflen, result);
1562}
1563EXPORT_SYMBOL_GPL(nvme_get_features);
1564
1565int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1566{
1567 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1568 u32 result;
1569 int status, nr_io_queues;
1570
1571 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1572 &result);
1573 if (status < 0)
1574 return status;
1575
1576
1577
1578
1579
1580
1581 if (status > 0) {
1582 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1583 *count = 0;
1584 } else {
1585 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1586 *count = min(*count, nr_io_queues);
1587 }
1588
1589 return 0;
1590}
1591EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1592
1593#define NVME_AEN_SUPPORTED \
1594 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1595 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1596
1597static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1598{
1599 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1600 int status;
1601
1602 if (!supported_aens)
1603 return;
1604
1605 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1606 NULL, 0, &result);
1607 if (status)
1608 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1609 supported_aens);
1610
1611 queue_work(nvme_wq, &ctrl->async_event_work);
1612}
1613
1614static int nvme_ns_open(struct nvme_ns *ns)
1615{
1616
1617
1618 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1619 goto fail;
1620 if (!nvme_get_ns(ns))
1621 goto fail;
1622 if (!try_module_get(ns->ctrl->ops->module))
1623 goto fail_put_ns;
1624
1625 return 0;
1626
1627fail_put_ns:
1628 nvme_put_ns(ns);
1629fail:
1630 return -ENXIO;
1631}
1632
1633static void nvme_ns_release(struct nvme_ns *ns)
1634{
1635
1636 module_put(ns->ctrl->ops->module);
1637 nvme_put_ns(ns);
1638}
1639
1640static int nvme_open(struct block_device *bdev, fmode_t mode)
1641{
1642 return nvme_ns_open(bdev->bd_disk->private_data);
1643}
1644
1645static void nvme_release(struct gendisk *disk, fmode_t mode)
1646{
1647 nvme_ns_release(disk->private_data);
1648}
1649
1650int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1651{
1652
1653 geo->heads = 1 << 6;
1654 geo->sectors = 1 << 5;
1655 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1656 return 0;
1657}
1658
1659#ifdef CONFIG_BLK_DEV_INTEGRITY
1660static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1661 u32 max_integrity_segments)
1662{
1663 struct blk_integrity integrity = { };
1664
1665 switch (pi_type) {
1666 case NVME_NS_DPS_PI_TYPE3:
1667 integrity.profile = &t10_pi_type3_crc;
1668 integrity.tag_size = sizeof(u16) + sizeof(u32);
1669 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1670 break;
1671 case NVME_NS_DPS_PI_TYPE1:
1672 case NVME_NS_DPS_PI_TYPE2:
1673 integrity.profile = &t10_pi_type1_crc;
1674 integrity.tag_size = sizeof(u16);
1675 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1676 break;
1677 default:
1678 integrity.profile = NULL;
1679 break;
1680 }
1681 integrity.tuple_size = ms;
1682 blk_integrity_register(disk, &integrity);
1683 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1684}
1685#else
1686static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1687 u32 max_integrity_segments)
1688{
1689}
1690#endif
1691
1692static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1693{
1694 struct nvme_ctrl *ctrl = ns->ctrl;
1695 struct request_queue *queue = disk->queue;
1696 u32 size = queue_logical_block_size(queue);
1697
1698 if (ctrl->max_discard_sectors == 0) {
1699 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1700 return;
1701 }
1702
1703 if (ctrl->nr_streams && ns->sws && ns->sgs)
1704 size *= ns->sws * ns->sgs;
1705
1706 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1707 NVME_DSM_MAX_RANGES);
1708
1709 queue->limits.discard_alignment = 0;
1710 queue->limits.discard_granularity = size;
1711
1712
1713 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1714 return;
1715
1716 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1717 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1718
1719 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1720 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1721}
1722
1723static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1724{
1725 return !uuid_is_null(&ids->uuid) ||
1726 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1727 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1728}
1729
1730static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1731{
1732 return uuid_equal(&a->uuid, &b->uuid) &&
1733 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1734 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1735 a->csi == b->csi;
1736}
1737
1738static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1739 u32 *phys_bs, u32 *io_opt)
1740{
1741 struct streams_directive_params s;
1742 int ret;
1743
1744 if (!ctrl->nr_streams)
1745 return 0;
1746
1747 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1748 if (ret)
1749 return ret;
1750
1751 ns->sws = le32_to_cpu(s.sws);
1752 ns->sgs = le16_to_cpu(s.sgs);
1753
1754 if (ns->sws) {
1755 *phys_bs = ns->sws * (1 << ns->lba_shift);
1756 if (ns->sgs)
1757 *io_opt = *phys_bs * ns->sgs;
1758 }
1759
1760 return 0;
1761}
1762
1763static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1764{
1765 struct nvme_ctrl *ctrl = ns->ctrl;
1766
1767
1768
1769
1770
1771 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1772 if (ns->ms == sizeof(struct t10_pi_tuple))
1773 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1774 else
1775 ns->pi_type = 0;
1776
1777 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1778 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1779 return 0;
1780 if (ctrl->ops->flags & NVME_F_FABRICS) {
1781
1782
1783
1784
1785
1786 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1787 return -EINVAL;
1788 if (ctrl->max_integrity_segments)
1789 ns->features |=
1790 (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1791 } else {
1792
1793
1794
1795
1796
1797
1798 if (id->flbas & NVME_NS_FLBAS_META_EXT)
1799 ns->features |= NVME_NS_EXT_LBAS;
1800 else
1801 ns->features |= NVME_NS_METADATA_SUPPORTED;
1802 }
1803
1804 return 0;
1805}
1806
1807static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1808 struct request_queue *q)
1809{
1810 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1811
1812 if (ctrl->max_hw_sectors) {
1813 u32 max_segments =
1814 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1815
1816 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1817 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1818 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1819 }
1820 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1821 blk_queue_dma_alignment(q, 7);
1822 blk_queue_write_cache(q, vwc, vwc);
1823}
1824
1825static void nvme_update_disk_info(struct gendisk *disk,
1826 struct nvme_ns *ns, struct nvme_id_ns *id)
1827{
1828 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1829 unsigned short bs = 1 << ns->lba_shift;
1830 u32 atomic_bs, phys_bs, io_opt = 0;
1831
1832
1833
1834
1835
1836 if (ns->lba_shift > PAGE_SHIFT) {
1837 capacity = 0;
1838 bs = (1 << 9);
1839 }
1840
1841 blk_integrity_unregister(disk);
1842
1843 atomic_bs = phys_bs = bs;
1844 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1845 if (id->nabo == 0) {
1846
1847
1848
1849
1850
1851 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1852 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1853 else
1854 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1855 }
1856
1857 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1858
1859 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1860
1861 io_opt = bs * (1 + le16_to_cpu(id->nows));
1862 }
1863
1864 blk_queue_logical_block_size(disk->queue, bs);
1865
1866
1867
1868
1869
1870 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1871 blk_queue_io_min(disk->queue, phys_bs);
1872 blk_queue_io_opt(disk->queue, io_opt);
1873
1874
1875
1876
1877
1878
1879
1880 if (ns->ms) {
1881 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1882 (ns->features & NVME_NS_METADATA_SUPPORTED))
1883 nvme_init_integrity(disk, ns->ms, ns->pi_type,
1884 ns->ctrl->max_integrity_segments);
1885 else if (!nvme_ns_has_pi(ns))
1886 capacity = 0;
1887 }
1888
1889 set_capacity_revalidate_and_notify(disk, capacity, true);
1890
1891 nvme_config_discard(disk, ns);
1892 blk_queue_max_write_zeroes_sectors(disk->queue,
1893 ns->ctrl->max_zeroes_sectors);
1894
1895 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
1896 test_bit(NVME_NS_FORCE_RO, &ns->flags));
1897}
1898
1899static inline bool nvme_first_scan(struct gendisk *disk)
1900{
1901
1902 return !(disk->flags & GENHD_FL_UP);
1903}
1904
1905static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1906{
1907 struct nvme_ctrl *ctrl = ns->ctrl;
1908 u32 iob;
1909
1910 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1911 is_power_of_2(ctrl->max_hw_sectors))
1912 iob = ctrl->max_hw_sectors;
1913 else
1914 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1915
1916 if (!iob)
1917 return;
1918
1919 if (!is_power_of_2(iob)) {
1920 if (nvme_first_scan(ns->disk))
1921 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1922 ns->disk->disk_name, iob);
1923 return;
1924 }
1925
1926 if (blk_queue_is_zoned(ns->disk->queue)) {
1927 if (nvme_first_scan(ns->disk))
1928 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1929 ns->disk->disk_name);
1930 return;
1931 }
1932
1933 blk_queue_chunk_sectors(ns->queue, iob);
1934}
1935
1936static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1937{
1938 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1939 int ret;
1940
1941 blk_mq_freeze_queue(ns->disk->queue);
1942 ns->lba_shift = id->lbaf[lbaf].ds;
1943 nvme_set_queue_limits(ns->ctrl, ns->queue);
1944
1945 ret = nvme_configure_metadata(ns, id);
1946 if (ret)
1947 goto out_unfreeze;
1948 nvme_set_chunk_sectors(ns, id);
1949 nvme_update_disk_info(ns->disk, ns, id);
1950
1951 if (ns->head->ids.csi == NVME_CSI_ZNS) {
1952 ret = nvme_update_zone_info(ns, lbaf);
1953 if (ret)
1954 goto out_unfreeze;
1955 }
1956
1957 blk_mq_unfreeze_queue(ns->disk->queue);
1958
1959 if (blk_queue_is_zoned(ns->queue)) {
1960 ret = nvme_revalidate_zones(ns);
1961 if (ret && !nvme_first_scan(ns->disk))
1962 goto out;
1963 }
1964
1965 if (nvme_ns_head_multipath(ns->head)) {
1966 blk_mq_freeze_queue(ns->head->disk->queue);
1967 nvme_update_disk_info(ns->head->disk, ns, id);
1968 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1969 blk_queue_update_readahead(ns->head->disk->queue);
1970 blk_mq_unfreeze_queue(ns->head->disk->queue);
1971 }
1972 return 0;
1973
1974out_unfreeze:
1975 blk_mq_unfreeze_queue(ns->disk->queue);
1976out:
1977
1978
1979
1980
1981 if (ret == -ENODEV) {
1982 ns->disk->flags |= GENHD_FL_HIDDEN;
1983 ret = 0;
1984 }
1985 return ret;
1986}
1987
1988static char nvme_pr_type(enum pr_type type)
1989{
1990 switch (type) {
1991 case PR_WRITE_EXCLUSIVE:
1992 return 1;
1993 case PR_EXCLUSIVE_ACCESS:
1994 return 2;
1995 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1996 return 3;
1997 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1998 return 4;
1999 case PR_WRITE_EXCLUSIVE_ALL_REGS:
2000 return 5;
2001 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
2002 return 6;
2003 default:
2004 return 0;
2005 }
2006};
2007
2008static int nvme_send_ns_head_pr_command(struct block_device *bdev,
2009 struct nvme_command *c, u8 data[16])
2010{
2011 struct nvme_ns_head *head = bdev->bd_disk->private_data;
2012 int srcu_idx = srcu_read_lock(&head->srcu);
2013 struct nvme_ns *ns = nvme_find_path(head);
2014 int ret = -EWOULDBLOCK;
2015
2016 if (ns) {
2017 c->common.nsid = cpu_to_le32(ns->head->ns_id);
2018 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
2019 }
2020 srcu_read_unlock(&head->srcu, srcu_idx);
2021 return ret;
2022}
2023
2024static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
2025 u8 data[16])
2026{
2027 c->common.nsid = cpu_to_le32(ns->head->ns_id);
2028 return nvme_submit_sync_cmd(ns->queue, c, data, 16);
2029}
2030
2031static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2032 u64 key, u64 sa_key, u8 op)
2033{
2034 struct nvme_command c = { };
2035 u8 data[16] = { 0, };
2036
2037 put_unaligned_le64(key, &data[0]);
2038 put_unaligned_le64(sa_key, &data[8]);
2039
2040 c.common.opcode = op;
2041 c.common.cdw10 = cpu_to_le32(cdw10);
2042
2043 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
2044 bdev->bd_disk->fops == &nvme_ns_head_ops)
2045 return nvme_send_ns_head_pr_command(bdev, &c, data);
2046 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
2047}
2048
2049static int nvme_pr_register(struct block_device *bdev, u64 old,
2050 u64 new, unsigned flags)
2051{
2052 u32 cdw10;
2053
2054 if (flags & ~PR_FL_IGNORE_KEY)
2055 return -EOPNOTSUPP;
2056
2057 cdw10 = old ? 2 : 0;
2058 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2059 cdw10 |= (1 << 30) | (1 << 31);
2060 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2061}
2062
2063static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2064 enum pr_type type, unsigned flags)
2065{
2066 u32 cdw10;
2067
2068 if (flags & ~PR_FL_IGNORE_KEY)
2069 return -EOPNOTSUPP;
2070
2071 cdw10 = nvme_pr_type(type) << 8;
2072 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2073 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2074}
2075
2076static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2077 enum pr_type type, bool abort)
2078{
2079 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2080
2081 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2082}
2083
2084static int nvme_pr_clear(struct block_device *bdev, u64 key)
2085{
2086 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2087
2088 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2089}
2090
2091static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2092{
2093 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2094
2095 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2096}
2097
2098const struct pr_ops nvme_pr_ops = {
2099 .pr_register = nvme_pr_register,
2100 .pr_reserve = nvme_pr_reserve,
2101 .pr_release = nvme_pr_release,
2102 .pr_preempt = nvme_pr_preempt,
2103 .pr_clear = nvme_pr_clear,
2104};
2105
2106#ifdef CONFIG_BLK_SED_OPAL
2107int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2108 bool send)
2109{
2110 struct nvme_ctrl *ctrl = data;
2111 struct nvme_command cmd = { };
2112
2113 if (send)
2114 cmd.common.opcode = nvme_admin_security_send;
2115 else
2116 cmd.common.opcode = nvme_admin_security_recv;
2117 cmd.common.nsid = 0;
2118 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2119 cmd.common.cdw11 = cpu_to_le32(len);
2120
2121 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
2122 NVME_QID_ANY, 1, 0);
2123}
2124EXPORT_SYMBOL_GPL(nvme_sec_submit);
2125#endif
2126
2127#ifdef CONFIG_BLK_DEV_ZONED
2128static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2129 unsigned int nr_zones, report_zones_cb cb, void *data)
2130{
2131 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2132 data);
2133}
2134#else
2135#define nvme_report_zones NULL
2136#endif
2137
2138static const struct block_device_operations nvme_bdev_ops = {
2139 .owner = THIS_MODULE,
2140 .ioctl = nvme_ioctl,
2141 .open = nvme_open,
2142 .release = nvme_release,
2143 .getgeo = nvme_getgeo,
2144 .report_zones = nvme_report_zones,
2145 .pr_ops = &nvme_pr_ops,
2146};
2147
2148static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2149{
2150 unsigned long timeout =
2151 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2152 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2153 int ret;
2154
2155 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2156 if (csts == ~0)
2157 return -ENODEV;
2158 if ((csts & NVME_CSTS_RDY) == bit)
2159 break;
2160
2161 usleep_range(1000, 2000);
2162 if (fatal_signal_pending(current))
2163 return -EINTR;
2164 if (time_after(jiffies, timeout)) {
2165 dev_err(ctrl->device,
2166 "Device not ready; aborting %s, CSTS=0x%x\n",
2167 enabled ? "initialisation" : "reset", csts);
2168 return -ENODEV;
2169 }
2170 }
2171
2172 return ret;
2173}
2174
2175
2176
2177
2178
2179
2180
2181int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2182{
2183 int ret;
2184
2185 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2186 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2187
2188 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2189 if (ret)
2190 return ret;
2191
2192 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2193 msleep(NVME_QUIRK_DELAY_AMOUNT);
2194
2195 return nvme_wait_ready(ctrl, ctrl->cap, false);
2196}
2197EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2198
2199int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2200{
2201 unsigned dev_page_min;
2202 int ret;
2203
2204 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2205 if (ret) {
2206 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2207 return ret;
2208 }
2209 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2210
2211 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2212 dev_err(ctrl->device,
2213 "Minimum device page size %u too large for host (%u)\n",
2214 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2215 return -ENODEV;
2216 }
2217
2218 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2219 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2220 else
2221 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2222 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2223 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2224 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2225 ctrl->ctrl_config |= NVME_CC_ENABLE;
2226
2227 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2228 if (ret)
2229 return ret;
2230 return nvme_wait_ready(ctrl, ctrl->cap, true);
2231}
2232EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2233
2234int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2235{
2236 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2237 u32 csts;
2238 int ret;
2239
2240 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2241 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2242
2243 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2244 if (ret)
2245 return ret;
2246
2247 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2248 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2249 break;
2250
2251 msleep(100);
2252 if (fatal_signal_pending(current))
2253 return -EINTR;
2254 if (time_after(jiffies, timeout)) {
2255 dev_err(ctrl->device,
2256 "Device shutdown incomplete; abort shutdown\n");
2257 return -ENODEV;
2258 }
2259 }
2260
2261 return ret;
2262}
2263EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2264
2265static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2266{
2267 __le64 ts;
2268 int ret;
2269
2270 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2271 return 0;
2272
2273 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2274 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2275 NULL);
2276 if (ret)
2277 dev_warn_once(ctrl->device,
2278 "could not set timestamp (%d)\n", ret);
2279 return ret;
2280}
2281
2282static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2283{
2284 struct nvme_feat_host_behavior *host;
2285 int ret;
2286
2287
2288 if (!ctrl->crdt[0])
2289 return 0;
2290
2291 host = kzalloc(sizeof(*host), GFP_KERNEL);
2292 if (!host)
2293 return 0;
2294
2295 host->acre = NVME_ENABLE_ACRE;
2296 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2297 host, sizeof(*host), NULL);
2298 kfree(host);
2299 return ret;
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310static bool nvme_apst_get_transition_time(u64 total_latency,
2311 u64 *transition_time, unsigned *last_index)
2312{
2313 if (total_latency <= apst_primary_latency_tol_us) {
2314 if (*last_index == 1)
2315 return false;
2316 *last_index = 1;
2317 *transition_time = apst_primary_timeout_ms;
2318 return true;
2319 }
2320 if (apst_secondary_timeout_ms &&
2321 total_latency <= apst_secondary_latency_tol_us) {
2322 if (*last_index <= 2)
2323 return false;
2324 *last_index = 2;
2325 *transition_time = apst_secondary_timeout_ms;
2326 return true;
2327 }
2328 return false;
2329}
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2357{
2358 struct nvme_feat_auto_pst *table;
2359 unsigned apste = 0;
2360 u64 max_lat_us = 0;
2361 __le64 target = 0;
2362 int max_ps = -1;
2363 int state;
2364 int ret;
2365 unsigned last_lt_index = UINT_MAX;
2366
2367
2368
2369
2370
2371 if (!ctrl->apsta)
2372 return 0;
2373
2374 if (ctrl->npss > 31) {
2375 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2376 return 0;
2377 }
2378
2379 table = kzalloc(sizeof(*table), GFP_KERNEL);
2380 if (!table)
2381 return 0;
2382
2383 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2384
2385 dev_dbg(ctrl->device, "APST disabled\n");
2386 goto done;
2387 }
2388
2389
2390
2391
2392
2393
2394
2395 for (state = (int)ctrl->npss; state >= 0; state--) {
2396 u64 total_latency_us, exit_latency_us, transition_ms;
2397
2398 if (target)
2399 table->entries[state] = target;
2400
2401
2402
2403
2404
2405 if (state == ctrl->npss &&
2406 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2407 continue;
2408
2409
2410
2411
2412
2413 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2414 continue;
2415
2416 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2417 if (exit_latency_us > ctrl->ps_max_latency_us)
2418 continue;
2419
2420 total_latency_us = exit_latency_us +
2421 le32_to_cpu(ctrl->psd[state].entry_lat);
2422
2423
2424
2425
2426
2427 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2428 if (!nvme_apst_get_transition_time(total_latency_us,
2429 &transition_ms, &last_lt_index))
2430 continue;
2431 } else {
2432 transition_ms = total_latency_us + 19;
2433 do_div(transition_ms, 20);
2434 if (transition_ms > (1 << 24) - 1)
2435 transition_ms = (1 << 24) - 1;
2436 }
2437
2438 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2439 if (max_ps == -1)
2440 max_ps = state;
2441 if (total_latency_us > max_lat_us)
2442 max_lat_us = total_latency_us;
2443 }
2444
2445 if (max_ps == -1)
2446 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2447 else
2448 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2449 max_ps, max_lat_us, (int)sizeof(*table), table);
2450 apste = 1;
2451
2452done:
2453 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2454 table, sizeof(*table), NULL);
2455 if (ret)
2456 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2457 kfree(table);
2458 return ret;
2459}
2460
2461static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2462{
2463 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2464 u64 latency;
2465
2466 switch (val) {
2467 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2468 case PM_QOS_LATENCY_ANY:
2469 latency = U64_MAX;
2470 break;
2471
2472 default:
2473 latency = val;
2474 }
2475
2476 if (ctrl->ps_max_latency_us != latency) {
2477 ctrl->ps_max_latency_us = latency;
2478 if (ctrl->state == NVME_CTRL_LIVE)
2479 nvme_configure_apst(ctrl);
2480 }
2481}
2482
2483struct nvme_core_quirk_entry {
2484
2485
2486
2487
2488
2489 u16 vid;
2490 const char *mn;
2491 const char *fr;
2492 unsigned long quirks;
2493};
2494
2495static const struct nvme_core_quirk_entry core_quirks[] = {
2496 {
2497
2498
2499
2500
2501 .vid = 0x1179,
2502 .mn = "THNSF5256GPUK TOSHIBA",
2503 .quirks = NVME_QUIRK_NO_APST,
2504 },
2505 {
2506
2507
2508
2509
2510
2511 .vid = 0x14a4,
2512 .fr = "22301111",
2513 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2514 }
2515};
2516
2517
2518static bool string_matches(const char *idstr, const char *match, size_t len)
2519{
2520 size_t matchlen;
2521
2522 if (!match)
2523 return true;
2524
2525 matchlen = strlen(match);
2526 WARN_ON_ONCE(matchlen > len);
2527
2528 if (memcmp(idstr, match, matchlen))
2529 return false;
2530
2531 for (; matchlen < len; matchlen++)
2532 if (idstr[matchlen] != ' ')
2533 return false;
2534
2535 return true;
2536}
2537
2538static bool quirk_matches(const struct nvme_id_ctrl *id,
2539 const struct nvme_core_quirk_entry *q)
2540{
2541 return q->vid == le16_to_cpu(id->vid) &&
2542 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2543 string_matches(id->fr, q->fr, sizeof(id->fr));
2544}
2545
2546static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2547 struct nvme_id_ctrl *id)
2548{
2549 size_t nqnlen;
2550 int off;
2551
2552 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2553 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2554 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2555 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2556 return;
2557 }
2558
2559 if (ctrl->vs >= NVME_VS(1, 2, 1))
2560 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2561 }
2562
2563
2564 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2565 "nqn.2014.08.org.nvmexpress:%04x%04x",
2566 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2567 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2568 off += sizeof(id->sn);
2569 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2570 off += sizeof(id->mn);
2571 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2572}
2573
2574static void nvme_release_subsystem(struct device *dev)
2575{
2576 struct nvme_subsystem *subsys =
2577 container_of(dev, struct nvme_subsystem, dev);
2578
2579 if (subsys->instance >= 0)
2580 ida_simple_remove(&nvme_instance_ida, subsys->instance);
2581 kfree(subsys);
2582}
2583
2584static void nvme_destroy_subsystem(struct kref *ref)
2585{
2586 struct nvme_subsystem *subsys =
2587 container_of(ref, struct nvme_subsystem, ref);
2588
2589 mutex_lock(&nvme_subsystems_lock);
2590 list_del(&subsys->entry);
2591 mutex_unlock(&nvme_subsystems_lock);
2592
2593 ida_destroy(&subsys->ns_ida);
2594 device_del(&subsys->dev);
2595 put_device(&subsys->dev);
2596}
2597
2598static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2599{
2600 kref_put(&subsys->ref, nvme_destroy_subsystem);
2601}
2602
2603static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2604{
2605 struct nvme_subsystem *subsys;
2606
2607 lockdep_assert_held(&nvme_subsystems_lock);
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2618 return NULL;
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2629 return NULL;
2630
2631 list_for_each_entry(subsys, &nvme_subsystems, entry) {
2632 if (strcmp(subsys->subnqn, subsysnqn))
2633 continue;
2634 if (!kref_get_unless_zero(&subsys->ref))
2635 continue;
2636 return subsys;
2637 }
2638
2639 return NULL;
2640}
2641
2642#define SUBSYS_ATTR_RO(_name, _mode, _show) \
2643 struct device_attribute subsys_attr_##_name = \
2644 __ATTR(_name, _mode, _show, NULL)
2645
2646static ssize_t nvme_subsys_show_nqn(struct device *dev,
2647 struct device_attribute *attr,
2648 char *buf)
2649{
2650 struct nvme_subsystem *subsys =
2651 container_of(dev, struct nvme_subsystem, dev);
2652
2653 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2654}
2655static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2656
2657#define nvme_subsys_show_str_function(field) \
2658static ssize_t subsys_##field##_show(struct device *dev, \
2659 struct device_attribute *attr, char *buf) \
2660{ \
2661 struct nvme_subsystem *subsys = \
2662 container_of(dev, struct nvme_subsystem, dev); \
2663 return sysfs_emit(buf, "%.*s\n", \
2664 (int)sizeof(subsys->field), subsys->field); \
2665} \
2666static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2667
2668nvme_subsys_show_str_function(model);
2669nvme_subsys_show_str_function(serial);
2670nvme_subsys_show_str_function(firmware_rev);
2671
2672static struct attribute *nvme_subsys_attrs[] = {
2673 &subsys_attr_model.attr,
2674 &subsys_attr_serial.attr,
2675 &subsys_attr_firmware_rev.attr,
2676 &subsys_attr_subsysnqn.attr,
2677#ifdef CONFIG_NVME_MULTIPATH
2678 &subsys_attr_iopolicy.attr,
2679#endif
2680 NULL,
2681};
2682
2683static const struct attribute_group nvme_subsys_attrs_group = {
2684 .attrs = nvme_subsys_attrs,
2685};
2686
2687static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2688 &nvme_subsys_attrs_group,
2689 NULL,
2690};
2691
2692static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2693{
2694 return ctrl->opts && ctrl->opts->discovery_nqn;
2695}
2696
2697static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2698 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2699{
2700 struct nvme_ctrl *tmp;
2701
2702 lockdep_assert_held(&nvme_subsystems_lock);
2703
2704 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2705 if (nvme_state_terminal(tmp))
2706 continue;
2707
2708 if (tmp->cntlid == ctrl->cntlid) {
2709 dev_err(ctrl->device,
2710 "Duplicate cntlid %u with %s, rejecting\n",
2711 ctrl->cntlid, dev_name(tmp->device));
2712 return false;
2713 }
2714
2715 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2716 nvme_discovery_ctrl(ctrl))
2717 continue;
2718
2719 dev_err(ctrl->device,
2720 "Subsystem does not support multiple controllers\n");
2721 return false;
2722 }
2723
2724 return true;
2725}
2726
2727static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2728{
2729 struct nvme_subsystem *subsys, *found;
2730 int ret;
2731
2732 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2733 if (!subsys)
2734 return -ENOMEM;
2735
2736 subsys->instance = -1;
2737 mutex_init(&subsys->lock);
2738 kref_init(&subsys->ref);
2739 INIT_LIST_HEAD(&subsys->ctrls);
2740 INIT_LIST_HEAD(&subsys->nsheads);
2741 nvme_init_subnqn(subsys, ctrl, id);
2742 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2743 memcpy(subsys->model, id->mn, sizeof(subsys->model));
2744 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2745 subsys->vendor_id = le16_to_cpu(id->vid);
2746 subsys->cmic = id->cmic;
2747 subsys->awupf = le16_to_cpu(id->awupf);
2748#ifdef CONFIG_NVME_MULTIPATH
2749 subsys->iopolicy = NVME_IOPOLICY_NUMA;
2750#endif
2751
2752 subsys->dev.class = nvme_subsys_class;
2753 subsys->dev.release = nvme_release_subsystem;
2754 subsys->dev.groups = nvme_subsys_attrs_groups;
2755 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2756 device_initialize(&subsys->dev);
2757
2758 mutex_lock(&nvme_subsystems_lock);
2759 found = __nvme_find_get_subsystem(subsys->subnqn);
2760 if (found) {
2761 put_device(&subsys->dev);
2762 subsys = found;
2763
2764 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2765 ret = -EINVAL;
2766 goto out_put_subsystem;
2767 }
2768 } else {
2769 ret = device_add(&subsys->dev);
2770 if (ret) {
2771 dev_err(ctrl->device,
2772 "failed to register subsystem device.\n");
2773 put_device(&subsys->dev);
2774 goto out_unlock;
2775 }
2776 ida_init(&subsys->ns_ida);
2777 list_add_tail(&subsys->entry, &nvme_subsystems);
2778 }
2779
2780 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2781 dev_name(ctrl->device));
2782 if (ret) {
2783 dev_err(ctrl->device,
2784 "failed to create sysfs link from subsystem.\n");
2785 goto out_put_subsystem;
2786 }
2787
2788 if (!found)
2789 subsys->instance = ctrl->instance;
2790 ctrl->subsys = subsys;
2791 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2792 mutex_unlock(&nvme_subsystems_lock);
2793 return 0;
2794
2795out_put_subsystem:
2796 nvme_put_subsystem(subsys);
2797out_unlock:
2798 mutex_unlock(&nvme_subsystems_lock);
2799 return ret;
2800}
2801
2802int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2803 void *log, size_t size, u64 offset)
2804{
2805 struct nvme_command c = { };
2806 u32 dwlen = nvme_bytes_to_numd(size);
2807
2808 c.get_log_page.opcode = nvme_admin_get_log_page;
2809 c.get_log_page.nsid = cpu_to_le32(nsid);
2810 c.get_log_page.lid = log_page;
2811 c.get_log_page.lsp = lsp;
2812 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2813 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2814 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2815 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2816 c.get_log_page.csi = csi;
2817
2818 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2819}
2820
2821static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2822 struct nvme_effects_log **log)
2823{
2824 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
2825 int ret;
2826
2827 if (cel)
2828 goto out;
2829
2830 cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2831 if (!cel)
2832 return -ENOMEM;
2833
2834 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2835 cel, sizeof(*cel), 0);
2836 if (ret) {
2837 kfree(cel);
2838 return ret;
2839 }
2840
2841 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2842out:
2843 *log = cel;
2844 return 0;
2845}
2846
2847static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2848{
2849 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2850
2851 if (check_shl_overflow(1U, units + page_shift - 9, &val))
2852 return UINT_MAX;
2853 return val;
2854}
2855
2856static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2857{
2858 struct nvme_command c = { };
2859 struct nvme_id_ctrl_nvm *id;
2860 int ret;
2861
2862 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2863 ctrl->max_discard_sectors = UINT_MAX;
2864 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2865 } else {
2866 ctrl->max_discard_sectors = 0;
2867 ctrl->max_discard_segments = 0;
2868 }
2869
2870
2871
2872
2873
2874
2875
2876 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2877 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2878 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2879 else
2880 ctrl->max_zeroes_sectors = 0;
2881
2882 if (nvme_ctrl_limited_cns(ctrl))
2883 return 0;
2884
2885 id = kzalloc(sizeof(*id), GFP_KERNEL);
2886 if (!id)
2887 return 0;
2888
2889 c.identify.opcode = nvme_admin_identify;
2890 c.identify.cns = NVME_ID_CNS_CS_CTRL;
2891 c.identify.csi = NVME_CSI_NVM;
2892
2893 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2894 if (ret)
2895 goto free_data;
2896
2897 if (id->dmrl)
2898 ctrl->max_discard_segments = id->dmrl;
2899 if (id->dmrsl)
2900 ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
2901 if (id->wzsl)
2902 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2903
2904free_data:
2905 kfree(id);
2906 return ret;
2907}
2908
2909static int nvme_init_identify(struct nvme_ctrl *ctrl)
2910{
2911 struct nvme_id_ctrl *id;
2912 u32 max_hw_sectors;
2913 bool prev_apst_enabled;
2914 int ret;
2915
2916 ret = nvme_identify_ctrl(ctrl, &id);
2917 if (ret) {
2918 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2919 return -EIO;
2920 }
2921
2922 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2923 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2924 if (ret < 0)
2925 goto out_free;
2926 }
2927
2928 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2929 ctrl->cntlid = le16_to_cpu(id->cntlid);
2930
2931 if (!ctrl->identified) {
2932 unsigned int i;
2933
2934 ret = nvme_init_subsystem(ctrl, id);
2935 if (ret)
2936 goto out_free;
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2947 if (quirk_matches(id, &core_quirks[i]))
2948 ctrl->quirks |= core_quirks[i].quirks;
2949 }
2950 }
2951
2952 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2953 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2954 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2955 }
2956
2957 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2958 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2959 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2960
2961 ctrl->oacs = le16_to_cpu(id->oacs);
2962 ctrl->oncs = le16_to_cpu(id->oncs);
2963 ctrl->mtfa = le16_to_cpu(id->mtfa);
2964 ctrl->oaes = le32_to_cpu(id->oaes);
2965 ctrl->wctemp = le16_to_cpu(id->wctemp);
2966 ctrl->cctemp = le16_to_cpu(id->cctemp);
2967
2968 atomic_set(&ctrl->abort_limit, id->acl + 1);
2969 ctrl->vwc = id->vwc;
2970 if (id->mdts)
2971 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
2972 else
2973 max_hw_sectors = UINT_MAX;
2974 ctrl->max_hw_sectors =
2975 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2976
2977 nvme_set_queue_limits(ctrl, ctrl->admin_q);
2978 ctrl->sgls = le32_to_cpu(id->sgls);
2979 ctrl->kas = le16_to_cpu(id->kas);
2980 ctrl->max_namespaces = le32_to_cpu(id->mnan);
2981 ctrl->ctratt = le32_to_cpu(id->ctratt);
2982
2983 if (id->rtd3e) {
2984
2985 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
2986
2987 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2988 shutdown_timeout, 60);
2989
2990 if (ctrl->shutdown_timeout != shutdown_timeout)
2991 dev_info(ctrl->device,
2992 "Shutdown timeout set to %u seconds\n",
2993 ctrl->shutdown_timeout);
2994 } else
2995 ctrl->shutdown_timeout = shutdown_timeout;
2996
2997 ctrl->npss = id->npss;
2998 ctrl->apsta = id->apsta;
2999 prev_apst_enabled = ctrl->apst_enabled;
3000 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3001 if (force_apst && id->apsta) {
3002 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3003 ctrl->apst_enabled = true;
3004 } else {
3005 ctrl->apst_enabled = false;
3006 }
3007 } else {
3008 ctrl->apst_enabled = id->apsta;
3009 }
3010 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3011
3012 if (ctrl->ops->flags & NVME_F_FABRICS) {
3013 ctrl->icdoff = le16_to_cpu(id->icdoff);
3014 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3015 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3016 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3017
3018
3019
3020
3021
3022 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3023 dev_err(ctrl->device,
3024 "Mismatching cntlid: Connect %u vs Identify "
3025 "%u, rejecting\n",
3026 ctrl->cntlid, le16_to_cpu(id->cntlid));
3027 ret = -EINVAL;
3028 goto out_free;
3029 }
3030
3031 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3032 dev_err(ctrl->device,
3033 "keep-alive support is mandatory for fabrics\n");
3034 ret = -EINVAL;
3035 goto out_free;
3036 }
3037 } else {
3038 ctrl->hmpre = le32_to_cpu(id->hmpre);
3039 ctrl->hmmin = le32_to_cpu(id->hmmin);
3040 ctrl->hmminds = le32_to_cpu(id->hmminds);
3041 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3042 }
3043
3044 ret = nvme_mpath_init_identify(ctrl, id);
3045 if (ret < 0)
3046 goto out_free;
3047
3048 if (ctrl->apst_enabled && !prev_apst_enabled)
3049 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3050 else if (!ctrl->apst_enabled && prev_apst_enabled)
3051 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3052
3053out_free:
3054 kfree(id);
3055 return ret;
3056}
3057
3058
3059
3060
3061
3062
3063int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
3064{
3065 int ret;
3066
3067 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3068 if (ret) {
3069 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3070 return ret;
3071 }
3072
3073 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3074
3075 if (ctrl->vs >= NVME_VS(1, 1, 0))
3076 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3077
3078 ret = nvme_init_identify(ctrl);
3079 if (ret)
3080 return ret;
3081
3082 ret = nvme_init_non_mdts_limits(ctrl);
3083 if (ret < 0)
3084 return ret;
3085
3086 ret = nvme_configure_apst(ctrl);
3087 if (ret < 0)
3088 return ret;
3089
3090 ret = nvme_configure_timestamp(ctrl);
3091 if (ret < 0)
3092 return ret;
3093
3094 ret = nvme_configure_directives(ctrl);
3095 if (ret < 0)
3096 return ret;
3097
3098 ret = nvme_configure_acre(ctrl);
3099 if (ret < 0)
3100 return ret;
3101
3102 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3103 ret = nvme_hwmon_init(ctrl);
3104 if (ret < 0)
3105 return ret;
3106 }
3107
3108 ctrl->identified = true;
3109
3110 return 0;
3111}
3112EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3113
3114static int nvme_dev_open(struct inode *inode, struct file *file)
3115{
3116 struct nvme_ctrl *ctrl =
3117 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3118
3119 switch (ctrl->state) {
3120 case NVME_CTRL_LIVE:
3121 break;
3122 default:
3123 return -EWOULDBLOCK;
3124 }
3125
3126 nvme_get_ctrl(ctrl);
3127 if (!try_module_get(ctrl->ops->module)) {
3128 nvme_put_ctrl(ctrl);
3129 return -EINVAL;
3130 }
3131
3132 file->private_data = ctrl;
3133 return 0;
3134}
3135
3136static int nvme_dev_release(struct inode *inode, struct file *file)
3137{
3138 struct nvme_ctrl *ctrl =
3139 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3140
3141 module_put(ctrl->ops->module);
3142 nvme_put_ctrl(ctrl);
3143 return 0;
3144}
3145
3146static const struct file_operations nvme_dev_fops = {
3147 .owner = THIS_MODULE,
3148 .open = nvme_dev_open,
3149 .release = nvme_dev_release,
3150 .unlocked_ioctl = nvme_dev_ioctl,
3151 .compat_ioctl = compat_ptr_ioctl,
3152};
3153
3154static ssize_t nvme_sysfs_reset(struct device *dev,
3155 struct device_attribute *attr, const char *buf,
3156 size_t count)
3157{
3158 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3159 int ret;
3160
3161 ret = nvme_reset_ctrl_sync(ctrl);
3162 if (ret < 0)
3163 return ret;
3164 return count;
3165}
3166static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3167
3168static ssize_t nvme_sysfs_rescan(struct device *dev,
3169 struct device_attribute *attr, const char *buf,
3170 size_t count)
3171{
3172 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3173
3174 nvme_queue_scan(ctrl);
3175 return count;
3176}
3177static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3178
3179static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3180{
3181 struct gendisk *disk = dev_to_disk(dev);
3182
3183 if (disk->fops == &nvme_bdev_ops)
3184 return nvme_get_ns_from_dev(dev)->head;
3185 else
3186 return disk->private_data;
3187}
3188
3189static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3190 char *buf)
3191{
3192 struct nvme_ns_head *head = dev_to_ns_head(dev);
3193 struct nvme_ns_ids *ids = &head->ids;
3194 struct nvme_subsystem *subsys = head->subsys;
3195 int serial_len = sizeof(subsys->serial);
3196 int model_len = sizeof(subsys->model);
3197
3198 if (!uuid_is_null(&ids->uuid))
3199 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3200
3201 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3202 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3203
3204 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3205 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3206
3207 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3208 subsys->serial[serial_len - 1] == '\0'))
3209 serial_len--;
3210 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3211 subsys->model[model_len - 1] == '\0'))
3212 model_len--;
3213
3214 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3215 serial_len, subsys->serial, model_len, subsys->model,
3216 head->ns_id);
3217}
3218static DEVICE_ATTR_RO(wwid);
3219
3220static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3221 char *buf)
3222{
3223 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3224}
3225static DEVICE_ATTR_RO(nguid);
3226
3227static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3228 char *buf)
3229{
3230 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3231
3232
3233
3234
3235 if (uuid_is_null(&ids->uuid)) {
3236 printk_ratelimited(KERN_WARNING
3237 "No UUID available providing old NGUID\n");
3238 return sysfs_emit(buf, "%pU\n", ids->nguid);
3239 }
3240 return sysfs_emit(buf, "%pU\n", &ids->uuid);
3241}
3242static DEVICE_ATTR_RO(uuid);
3243
3244static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3245 char *buf)
3246{
3247 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3248}
3249static DEVICE_ATTR_RO(eui);
3250
3251static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3252 char *buf)
3253{
3254 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3255}
3256static DEVICE_ATTR_RO(nsid);
3257
3258static struct attribute *nvme_ns_id_attrs[] = {
3259 &dev_attr_wwid.attr,
3260 &dev_attr_uuid.attr,
3261 &dev_attr_nguid.attr,
3262 &dev_attr_eui.attr,
3263 &dev_attr_nsid.attr,
3264#ifdef CONFIG_NVME_MULTIPATH
3265 &dev_attr_ana_grpid.attr,
3266 &dev_attr_ana_state.attr,
3267#endif
3268 NULL,
3269};
3270
3271static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3272 struct attribute *a, int n)
3273{
3274 struct device *dev = container_of(kobj, struct device, kobj);
3275 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3276
3277 if (a == &dev_attr_uuid.attr) {
3278 if (uuid_is_null(&ids->uuid) &&
3279 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3280 return 0;
3281 }
3282 if (a == &dev_attr_nguid.attr) {
3283 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3284 return 0;
3285 }
3286 if (a == &dev_attr_eui.attr) {
3287 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3288 return 0;
3289 }
3290#ifdef CONFIG_NVME_MULTIPATH
3291 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3292 if (dev_to_disk(dev)->fops != &nvme_bdev_ops)
3293 return 0;
3294 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3295 return 0;
3296 }
3297#endif
3298 return a->mode;
3299}
3300
3301static const struct attribute_group nvme_ns_id_attr_group = {
3302 .attrs = nvme_ns_id_attrs,
3303 .is_visible = nvme_ns_id_attrs_are_visible,
3304};
3305
3306const struct attribute_group *nvme_ns_id_attr_groups[] = {
3307 &nvme_ns_id_attr_group,
3308#ifdef CONFIG_NVM
3309 &nvme_nvm_attr_group,
3310#endif
3311 NULL,
3312};
3313
3314#define nvme_show_str_function(field) \
3315static ssize_t field##_show(struct device *dev, \
3316 struct device_attribute *attr, char *buf) \
3317{ \
3318 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3319 return sysfs_emit(buf, "%.*s\n", \
3320 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3321} \
3322static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3323
3324nvme_show_str_function(model);
3325nvme_show_str_function(serial);
3326nvme_show_str_function(firmware_rev);
3327
3328#define nvme_show_int_function(field) \
3329static ssize_t field##_show(struct device *dev, \
3330 struct device_attribute *attr, char *buf) \
3331{ \
3332 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3333 return sysfs_emit(buf, "%d\n", ctrl->field); \
3334} \
3335static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3336
3337nvme_show_int_function(cntlid);
3338nvme_show_int_function(numa_node);
3339nvme_show_int_function(queue_count);
3340nvme_show_int_function(sqsize);
3341nvme_show_int_function(kato);
3342
3343static ssize_t nvme_sysfs_delete(struct device *dev,
3344 struct device_attribute *attr, const char *buf,
3345 size_t count)
3346{
3347 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3348
3349 if (device_remove_file_self(dev, attr))
3350 nvme_delete_ctrl_sync(ctrl);
3351 return count;
3352}
3353static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3354
3355static ssize_t nvme_sysfs_show_transport(struct device *dev,
3356 struct device_attribute *attr,
3357 char *buf)
3358{
3359 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3360
3361 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3362}
3363static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3364
3365static ssize_t nvme_sysfs_show_state(struct device *dev,
3366 struct device_attribute *attr,
3367 char *buf)
3368{
3369 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3370 static const char *const state_name[] = {
3371 [NVME_CTRL_NEW] = "new",
3372 [NVME_CTRL_LIVE] = "live",
3373 [NVME_CTRL_RESETTING] = "resetting",
3374 [NVME_CTRL_CONNECTING] = "connecting",
3375 [NVME_CTRL_DELETING] = "deleting",
3376 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3377 [NVME_CTRL_DEAD] = "dead",
3378 };
3379
3380 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3381 state_name[ctrl->state])
3382 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3383
3384 return sysfs_emit(buf, "unknown state\n");
3385}
3386
3387static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3388
3389static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3390 struct device_attribute *attr,
3391 char *buf)
3392{
3393 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3394
3395 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3396}
3397static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3398
3399static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3400 struct device_attribute *attr,
3401 char *buf)
3402{
3403 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3404
3405 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
3406}
3407static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3408
3409static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3410 struct device_attribute *attr,
3411 char *buf)
3412{
3413 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3414
3415 return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
3416}
3417static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3418
3419static ssize_t nvme_sysfs_show_address(struct device *dev,
3420 struct device_attribute *attr,
3421 char *buf)
3422{
3423 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3424
3425 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3426}
3427static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3428
3429static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
3430 struct device_attribute *attr, char *buf)
3431{
3432 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3433 struct nvmf_ctrl_options *opts = ctrl->opts;
3434
3435 if (ctrl->opts->max_reconnects == -1)
3436 return sysfs_emit(buf, "off\n");
3437 return sysfs_emit(buf, "%d\n",
3438 opts->max_reconnects * opts->reconnect_delay);
3439}
3440
3441static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
3442 struct device_attribute *attr, const char *buf, size_t count)
3443{
3444 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3445 struct nvmf_ctrl_options *opts = ctrl->opts;
3446 int ctrl_loss_tmo, err;
3447
3448 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
3449 if (err)
3450 return -EINVAL;
3451
3452 if (ctrl_loss_tmo < 0)
3453 opts->max_reconnects = -1;
3454 else
3455 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3456 opts->reconnect_delay);
3457 return count;
3458}
3459static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
3460 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
3461
3462static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
3463 struct device_attribute *attr, char *buf)
3464{
3465 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3466
3467 if (ctrl->opts->reconnect_delay == -1)
3468 return sysfs_emit(buf, "off\n");
3469 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3470}
3471
3472static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
3473 struct device_attribute *attr, const char *buf, size_t count)
3474{
3475 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3476 unsigned int v;
3477 int err;
3478
3479 err = kstrtou32(buf, 10, &v);
3480 if (err)
3481 return err;
3482
3483 ctrl->opts->reconnect_delay = v;
3484 return count;
3485}
3486static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
3487 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
3488
3489static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
3490 struct device_attribute *attr, char *buf)
3491{
3492 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3493
3494 if (ctrl->opts->fast_io_fail_tmo == -1)
3495 return sysfs_emit(buf, "off\n");
3496 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
3497}
3498
3499static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
3500 struct device_attribute *attr, const char *buf, size_t count)
3501{
3502 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3503 struct nvmf_ctrl_options *opts = ctrl->opts;
3504 int fast_io_fail_tmo, err;
3505
3506 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
3507 if (err)
3508 return -EINVAL;
3509
3510 if (fast_io_fail_tmo < 0)
3511 opts->fast_io_fail_tmo = -1;
3512 else
3513 opts->fast_io_fail_tmo = fast_io_fail_tmo;
3514 return count;
3515}
3516static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
3517 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
3518
3519static struct attribute *nvme_dev_attrs[] = {
3520 &dev_attr_reset_controller.attr,
3521 &dev_attr_rescan_controller.attr,
3522 &dev_attr_model.attr,
3523 &dev_attr_serial.attr,
3524 &dev_attr_firmware_rev.attr,
3525 &dev_attr_cntlid.attr,
3526 &dev_attr_delete_controller.attr,
3527 &dev_attr_transport.attr,
3528 &dev_attr_subsysnqn.attr,
3529 &dev_attr_address.attr,
3530 &dev_attr_state.attr,
3531 &dev_attr_numa_node.attr,
3532 &dev_attr_queue_count.attr,
3533 &dev_attr_sqsize.attr,
3534 &dev_attr_hostnqn.attr,
3535 &dev_attr_hostid.attr,
3536 &dev_attr_ctrl_loss_tmo.attr,
3537 &dev_attr_kato.attr,
3538 &dev_attr_reconnect_delay.attr,
3539 &dev_attr_fast_io_fail_tmo.attr,
3540 NULL
3541};
3542
3543static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3544 struct attribute *a, int n)
3545{
3546 struct device *dev = container_of(kobj, struct device, kobj);
3547 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3548
3549 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3550 return 0;
3551 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3552 return 0;
3553 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3554 return 0;
3555 if (a == &dev_attr_hostid.attr && !ctrl->opts)
3556 return 0;
3557 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
3558 return 0;
3559 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
3560 return 0;
3561 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
3562 return 0;
3563 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
3564 return 0;
3565
3566 return a->mode;
3567}
3568
3569static const struct attribute_group nvme_dev_attrs_group = {
3570 .attrs = nvme_dev_attrs,
3571 .is_visible = nvme_dev_attrs_are_visible,
3572};
3573
3574static const struct attribute_group *nvme_dev_attr_groups[] = {
3575 &nvme_dev_attrs_group,
3576 NULL,
3577};
3578
3579static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3580 unsigned nsid)
3581{
3582 struct nvme_ns_head *h;
3583
3584 lockdep_assert_held(&subsys->lock);
3585
3586 list_for_each_entry(h, &subsys->nsheads, entry) {
3587 if (h->ns_id != nsid)
3588 continue;
3589 if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3590 return h;
3591 }
3592
3593 return NULL;
3594}
3595
3596static int __nvme_check_ids(struct nvme_subsystem *subsys,
3597 struct nvme_ns_head *new)
3598{
3599 struct nvme_ns_head *h;
3600
3601 lockdep_assert_held(&subsys->lock);
3602
3603 list_for_each_entry(h, &subsys->nsheads, entry) {
3604 if (nvme_ns_ids_valid(&new->ids) &&
3605 nvme_ns_ids_equal(&new->ids, &h->ids))
3606 return -EINVAL;
3607 }
3608
3609 return 0;
3610}
3611
3612static void nvme_cdev_rel(struct device *dev)
3613{
3614 ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3615}
3616
3617void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3618{
3619 cdev_device_del(cdev, cdev_device);
3620 put_device(cdev_device);
3621}
3622
3623int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3624 const struct file_operations *fops, struct module *owner)
3625{
3626 int minor, ret;
3627
3628 minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
3629 if (minor < 0)
3630 return minor;
3631 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3632 cdev_device->class = nvme_ns_chr_class;
3633 cdev_device->release = nvme_cdev_rel;
3634 device_initialize(cdev_device);
3635 cdev_init(cdev, fops);
3636 cdev->owner = owner;
3637 ret = cdev_device_add(cdev, cdev_device);
3638 if (ret)
3639 put_device(cdev_device);
3640
3641 return ret;
3642}
3643
3644static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3645{
3646 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3647}
3648
3649static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3650{
3651 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3652 return 0;
3653}
3654
3655static const struct file_operations nvme_ns_chr_fops = {
3656 .owner = THIS_MODULE,
3657 .open = nvme_ns_chr_open,
3658 .release = nvme_ns_chr_release,
3659 .unlocked_ioctl = nvme_ns_chr_ioctl,
3660 .compat_ioctl = compat_ptr_ioctl,
3661};
3662
3663static int nvme_add_ns_cdev(struct nvme_ns *ns)
3664{
3665 int ret;
3666
3667 ns->cdev_device.parent = ns->ctrl->device;
3668 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3669 ns->ctrl->instance, ns->head->instance);
3670 if (ret)
3671 return ret;
3672
3673 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3674 ns->ctrl->ops->module);
3675}
3676
3677static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3678 unsigned nsid, struct nvme_ns_ids *ids)
3679{
3680 struct nvme_ns_head *head;
3681 size_t size = sizeof(*head);
3682 int ret = -ENOMEM;
3683
3684#ifdef CONFIG_NVME_MULTIPATH
3685 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3686#endif
3687
3688 head = kzalloc(size, GFP_KERNEL);
3689 if (!head)
3690 goto out;
3691 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3692 if (ret < 0)
3693 goto out_free_head;
3694 head->instance = ret;
3695 INIT_LIST_HEAD(&head->list);
3696 ret = init_srcu_struct(&head->srcu);
3697 if (ret)
3698 goto out_ida_remove;
3699 head->subsys = ctrl->subsys;
3700 head->ns_id = nsid;
3701 head->ids = *ids;
3702 kref_init(&head->ref);
3703
3704 ret = __nvme_check_ids(ctrl->subsys, head);
3705 if (ret) {
3706 dev_err(ctrl->device,
3707 "duplicate IDs for nsid %d\n", nsid);
3708 goto out_cleanup_srcu;
3709 }
3710
3711 if (head->ids.csi) {
3712 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3713 if (ret)
3714 goto out_cleanup_srcu;
3715 } else
3716 head->effects = ctrl->effects;
3717
3718 ret = nvme_mpath_alloc_disk(ctrl, head);
3719 if (ret)
3720 goto out_cleanup_srcu;
3721
3722 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3723
3724 kref_get(&ctrl->subsys->ref);
3725
3726 return head;
3727out_cleanup_srcu:
3728 cleanup_srcu_struct(&head->srcu);
3729out_ida_remove:
3730 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3731out_free_head:
3732 kfree(head);
3733out:
3734 if (ret > 0)
3735 ret = blk_status_to_errno(nvme_error_status(ret));
3736 return ERR_PTR(ret);
3737}
3738
3739static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3740 struct nvme_ns_ids *ids, bool is_shared)
3741{
3742 struct nvme_ctrl *ctrl = ns->ctrl;
3743 struct nvme_ns_head *head = NULL;
3744 int ret = 0;
3745
3746 mutex_lock(&ctrl->subsys->lock);
3747 head = nvme_find_ns_head(ctrl->subsys, nsid);
3748 if (!head) {
3749 head = nvme_alloc_ns_head(ctrl, nsid, ids);
3750 if (IS_ERR(head)) {
3751 ret = PTR_ERR(head);
3752 goto out_unlock;
3753 }
3754 head->shared = is_shared;
3755 } else {
3756 ret = -EINVAL;
3757 if (!is_shared || !head->shared) {
3758 dev_err(ctrl->device,
3759 "Duplicate unshared namespace %d\n", nsid);
3760 goto out_put_ns_head;
3761 }
3762 if (!nvme_ns_ids_equal(&head->ids, ids)) {
3763 dev_err(ctrl->device,
3764 "IDs don't match for shared namespace %d\n",
3765 nsid);
3766 goto out_put_ns_head;
3767 }
3768 }
3769
3770 list_add_tail_rcu(&ns->siblings, &head->list);
3771 ns->head = head;
3772 mutex_unlock(&ctrl->subsys->lock);
3773 return 0;
3774
3775out_put_ns_head:
3776 nvme_put_ns_head(head);
3777out_unlock:
3778 mutex_unlock(&ctrl->subsys->lock);
3779 return ret;
3780}
3781
3782struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3783{
3784 struct nvme_ns *ns, *ret = NULL;
3785
3786 down_read(&ctrl->namespaces_rwsem);
3787 list_for_each_entry(ns, &ctrl->namespaces, list) {
3788 if (ns->head->ns_id == nsid) {
3789 if (!nvme_get_ns(ns))
3790 continue;
3791 ret = ns;
3792 break;
3793 }
3794 if (ns->head->ns_id > nsid)
3795 break;
3796 }
3797 up_read(&ctrl->namespaces_rwsem);
3798 return ret;
3799}
3800EXPORT_SYMBOL_GPL(nvme_find_get_ns);
3801
3802
3803
3804
3805static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3806{
3807 struct nvme_ns *tmp;
3808
3809 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3810 if (tmp->head->ns_id < ns->head->ns_id) {
3811 list_add(&ns->list, &tmp->list);
3812 return;
3813 }
3814 }
3815 list_add(&ns->list, &ns->ctrl->namespaces);
3816}
3817
3818static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
3819 struct nvme_ns_ids *ids)
3820{
3821 struct nvme_ns *ns;
3822 struct gendisk *disk;
3823 struct nvme_id_ns *id;
3824 int node = ctrl->numa_node;
3825
3826 if (nvme_identify_ns(ctrl, nsid, ids, &id))
3827 return;
3828
3829 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3830 if (!ns)
3831 goto out_free_id;
3832
3833 ns->queue = blk_mq_init_queue(ctrl->tagset);
3834 if (IS_ERR(ns->queue))
3835 goto out_free_ns;
3836
3837 if (ctrl->opts && ctrl->opts->data_digest)
3838 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3839
3840 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3841 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3842 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3843
3844 ns->queue->queuedata = ns;
3845 ns->ctrl = ctrl;
3846 kref_init(&ns->kref);
3847
3848 if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
3849 goto out_free_queue;
3850
3851 disk = alloc_disk_node(0, node);
3852 if (!disk)
3853 goto out_unlink_ns;
3854
3855 disk->fops = &nvme_bdev_ops;
3856 disk->private_data = ns;
3857 disk->queue = ns->queue;
3858 disk->flags = GENHD_FL_EXT_DEVT;
3859
3860
3861
3862
3863
3864 if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
3865 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3866 ns->head->instance);
3867 ns->disk = disk;
3868
3869 if (nvme_update_ns_info(ns, id))
3870 goto out_put_disk;
3871
3872 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3873 if (nvme_nvm_register(ns, disk->disk_name, node)) {
3874 dev_warn(ctrl->device, "LightNVM init failure\n");
3875 goto out_put_disk;
3876 }
3877 }
3878
3879 down_write(&ctrl->namespaces_rwsem);
3880 nvme_ns_add_to_ctrl_list(ns);
3881 up_write(&ctrl->namespaces_rwsem);
3882 nvme_get_ctrl(ctrl);
3883
3884 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3885 if (!nvme_ns_head_multipath(ns->head))
3886 nvme_add_ns_cdev(ns);
3887
3888 nvme_mpath_add_disk(ns, id);
3889 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3890 kfree(id);
3891
3892 return;
3893 out_put_disk:
3894
3895 ns->disk->queue = NULL;
3896 put_disk(ns->disk);
3897 out_unlink_ns:
3898 mutex_lock(&ctrl->subsys->lock);
3899 list_del_rcu(&ns->siblings);
3900 if (list_empty(&ns->head->list))
3901 list_del_init(&ns->head->entry);
3902 mutex_unlock(&ctrl->subsys->lock);
3903 nvme_put_ns_head(ns->head);
3904 out_free_queue:
3905 blk_cleanup_queue(ns->queue);
3906 out_free_ns:
3907 kfree(ns);
3908 out_free_id:
3909 kfree(id);
3910}
3911
3912static void nvme_ns_remove(struct nvme_ns *ns)
3913{
3914 bool last_path = false;
3915
3916 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3917 return;
3918
3919 set_capacity(ns->disk, 0);
3920 nvme_fault_inject_fini(&ns->fault_inject);
3921
3922 mutex_lock(&ns->ctrl->subsys->lock);
3923 list_del_rcu(&ns->siblings);
3924 if (list_empty(&ns->head->list)) {
3925 list_del_init(&ns->head->entry);
3926 last_path = true;
3927 }
3928 mutex_unlock(&ns->ctrl->subsys->lock);
3929
3930 synchronize_rcu();
3931 nvme_mpath_clear_current_path(ns);
3932 synchronize_srcu(&ns->head->srcu);
3933
3934 if (ns->disk->flags & GENHD_FL_UP) {
3935 if (!nvme_ns_head_multipath(ns->head))
3936 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3937 del_gendisk(ns->disk);
3938 blk_cleanup_queue(ns->queue);
3939 if (blk_get_integrity(ns->disk))
3940 blk_integrity_unregister(ns->disk);
3941 }
3942
3943 down_write(&ns->ctrl->namespaces_rwsem);
3944 list_del_init(&ns->list);
3945 up_write(&ns->ctrl->namespaces_rwsem);
3946
3947 if (last_path)
3948 nvme_mpath_shutdown_disk(ns->head);
3949 nvme_put_ns(ns);
3950}
3951
3952static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3953{
3954 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3955
3956 if (ns) {
3957 nvme_ns_remove(ns);
3958 nvme_put_ns(ns);
3959 }
3960}
3961
3962static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
3963{
3964 struct nvme_id_ns *id;
3965 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3966
3967 if (test_bit(NVME_NS_DEAD, &ns->flags))
3968 goto out;
3969
3970 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
3971 if (ret)
3972 goto out;
3973
3974 ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3975 if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
3976 dev_err(ns->ctrl->device,
3977 "identifiers changed for nsid %d\n", ns->head->ns_id);
3978 goto out_free_id;
3979 }
3980
3981 ret = nvme_update_ns_info(ns, id);
3982
3983out_free_id:
3984 kfree(id);
3985out:
3986
3987
3988
3989
3990
3991
3992 if (ret > 0 && (ret & NVME_SC_DNR))
3993 nvme_ns_remove(ns);
3994}
3995
3996static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3997{
3998 struct nvme_ns_ids ids = { };
3999 struct nvme_ns *ns;
4000
4001 if (nvme_identify_ns_descs(ctrl, nsid, &ids))
4002 return;
4003
4004 ns = nvme_find_get_ns(ctrl, nsid);
4005 if (ns) {
4006 nvme_validate_ns(ns, &ids);
4007 nvme_put_ns(ns);
4008 return;
4009 }
4010
4011 switch (ids.csi) {
4012 case NVME_CSI_NVM:
4013 nvme_alloc_ns(ctrl, nsid, &ids);
4014 break;
4015 case NVME_CSI_ZNS:
4016 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
4017 dev_warn(ctrl->device,
4018 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
4019 nsid);
4020 break;
4021 }
4022 if (!nvme_multi_css(ctrl)) {
4023 dev_warn(ctrl->device,
4024 "command set not reported for nsid: %d\n",
4025 nsid);
4026 break;
4027 }
4028 nvme_alloc_ns(ctrl, nsid, &ids);
4029 break;
4030 default:
4031 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
4032 ids.csi, nsid);
4033 break;
4034 }
4035}
4036
4037static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
4038 unsigned nsid)
4039{
4040 struct nvme_ns *ns, *next;
4041 LIST_HEAD(rm_list);
4042
4043 down_write(&ctrl->namespaces_rwsem);
4044 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4045 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4046 list_move_tail(&ns->list, &rm_list);
4047 }
4048 up_write(&ctrl->namespaces_rwsem);
4049
4050 list_for_each_entry_safe(ns, next, &rm_list, list)
4051 nvme_ns_remove(ns);
4052
4053}
4054
4055static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4056{
4057 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4058 __le32 *ns_list;
4059 u32 prev = 0;
4060 int ret = 0, i;
4061
4062 if (nvme_ctrl_limited_cns(ctrl))
4063 return -EOPNOTSUPP;
4064
4065 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4066 if (!ns_list)
4067 return -ENOMEM;
4068
4069 for (;;) {
4070 struct nvme_command cmd = {
4071 .identify.opcode = nvme_admin_identify,
4072 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
4073 .identify.nsid = cpu_to_le32(prev),
4074 };
4075
4076 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4077 NVME_IDENTIFY_DATA_SIZE);
4078 if (ret) {
4079 dev_warn(ctrl->device,
4080 "Identify NS List failed (status=0x%x)\n", ret);
4081 goto free;
4082 }
4083
4084 for (i = 0; i < nr_entries; i++) {
4085 u32 nsid = le32_to_cpu(ns_list[i]);
4086
4087 if (!nsid)
4088 goto out;
4089 nvme_validate_or_alloc_ns(ctrl, nsid);
4090 while (++prev < nsid)
4091 nvme_ns_remove_by_nsid(ctrl, prev);
4092 }
4093 }
4094 out:
4095 nvme_remove_invalid_namespaces(ctrl, prev);
4096 free:
4097 kfree(ns_list);
4098 return ret;
4099}
4100
4101static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4102{
4103 struct nvme_id_ctrl *id;
4104 u32 nn, i;
4105
4106 if (nvme_identify_ctrl(ctrl, &id))
4107 return;
4108 nn = le32_to_cpu(id->nn);
4109 kfree(id);
4110
4111 for (i = 1; i <= nn; i++)
4112 nvme_validate_or_alloc_ns(ctrl, i);
4113
4114 nvme_remove_invalid_namespaces(ctrl, nn);
4115}
4116
4117static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4118{
4119 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4120 __le32 *log;
4121 int error;
4122
4123 log = kzalloc(log_size, GFP_KERNEL);
4124 if (!log)
4125 return;
4126
4127
4128
4129
4130
4131
4132
4133 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4134 NVME_CSI_NVM, log, log_size, 0);
4135 if (error)
4136 dev_warn(ctrl->device,
4137 "reading changed ns log failed: %d\n", error);
4138
4139 kfree(log);
4140}
4141
4142static void nvme_scan_work(struct work_struct *work)
4143{
4144 struct nvme_ctrl *ctrl =
4145 container_of(work, struct nvme_ctrl, scan_work);
4146
4147
4148 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4149 return;
4150
4151 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4152 dev_info(ctrl->device, "rescanning namespaces.\n");
4153 nvme_clear_changed_ns_log(ctrl);
4154 }
4155
4156 mutex_lock(&ctrl->scan_lock);
4157 if (nvme_scan_ns_list(ctrl) != 0)
4158 nvme_scan_ns_sequential(ctrl);
4159 mutex_unlock(&ctrl->scan_lock);
4160}
4161
4162
4163
4164
4165
4166
4167void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4168{
4169 struct nvme_ns *ns, *next;
4170 LIST_HEAD(ns_list);
4171
4172
4173
4174
4175
4176
4177 nvme_mpath_clear_ctrl_paths(ctrl);
4178
4179
4180 flush_work(&ctrl->scan_work);
4181
4182
4183
4184
4185
4186
4187
4188 if (ctrl->state == NVME_CTRL_DEAD)
4189 nvme_kill_queues(ctrl);
4190
4191
4192 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4193
4194 down_write(&ctrl->namespaces_rwsem);
4195 list_splice_init(&ctrl->namespaces, &ns_list);
4196 up_write(&ctrl->namespaces_rwsem);
4197
4198 list_for_each_entry_safe(ns, next, &ns_list, list)
4199 nvme_ns_remove(ns);
4200}
4201EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4202
4203static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
4204{
4205 struct nvme_ctrl *ctrl =
4206 container_of(dev, struct nvme_ctrl, ctrl_device);
4207 struct nvmf_ctrl_options *opts = ctrl->opts;
4208 int ret;
4209
4210 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4211 if (ret)
4212 return ret;
4213
4214 if (opts) {
4215 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4216 if (ret)
4217 return ret;
4218
4219 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4220 opts->trsvcid ?: "none");
4221 if (ret)
4222 return ret;
4223
4224 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4225 opts->host_traddr ?: "none");
4226 if (ret)
4227 return ret;
4228
4229 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4230 opts->host_iface ?: "none");
4231 }
4232 return ret;
4233}
4234
4235static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4236{
4237 char *envp[2] = { NULL, NULL };
4238 u32 aen_result = ctrl->aen_result;
4239
4240 ctrl->aen_result = 0;
4241 if (!aen_result)
4242 return;
4243
4244 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4245 if (!envp[0])
4246 return;
4247 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4248 kfree(envp[0]);
4249}
4250
4251static void nvme_async_event_work(struct work_struct *work)
4252{
4253 struct nvme_ctrl *ctrl =
4254 container_of(work, struct nvme_ctrl, async_event_work);
4255
4256 nvme_aen_uevent(ctrl);
4257
4258
4259
4260
4261
4262
4263 if (ctrl->state == NVME_CTRL_LIVE)
4264 ctrl->ops->submit_async_event(ctrl);
4265}
4266
4267static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4268{
4269
4270 u32 csts;
4271
4272 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4273 return false;
4274
4275 if (csts == ~0)
4276 return false;
4277
4278 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4279}
4280
4281static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4282{
4283 struct nvme_fw_slot_info_log *log;
4284
4285 log = kmalloc(sizeof(*log), GFP_KERNEL);
4286 if (!log)
4287 return;
4288
4289 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4290 log, sizeof(*log), 0))
4291 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4292 kfree(log);
4293}
4294
4295static void nvme_fw_act_work(struct work_struct *work)
4296{
4297 struct nvme_ctrl *ctrl = container_of(work,
4298 struct nvme_ctrl, fw_act_work);
4299 unsigned long fw_act_timeout;
4300
4301 if (ctrl->mtfa)
4302 fw_act_timeout = jiffies +
4303 msecs_to_jiffies(ctrl->mtfa * 100);
4304 else
4305 fw_act_timeout = jiffies +
4306 msecs_to_jiffies(admin_timeout * 1000);
4307
4308 nvme_stop_queues(ctrl);
4309 while (nvme_ctrl_pp_status(ctrl)) {
4310 if (time_after(jiffies, fw_act_timeout)) {
4311 dev_warn(ctrl->device,
4312 "Fw activation timeout, reset controller\n");
4313 nvme_try_sched_reset(ctrl);
4314 return;
4315 }
4316 msleep(100);
4317 }
4318
4319 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4320 return;
4321
4322 nvme_start_queues(ctrl);
4323
4324 nvme_get_fw_slot_info(ctrl);
4325}
4326
4327static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4328{
4329 u32 aer_notice_type = (result & 0xff00) >> 8;
4330
4331 trace_nvme_async_event(ctrl, aer_notice_type);
4332
4333 switch (aer_notice_type) {
4334 case NVME_AER_NOTICE_NS_CHANGED:
4335 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4336 nvme_queue_scan(ctrl);
4337 break;
4338 case NVME_AER_NOTICE_FW_ACT_STARTING:
4339
4340
4341
4342
4343
4344 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4345 queue_work(nvme_wq, &ctrl->fw_act_work);
4346 break;
4347#ifdef CONFIG_NVME_MULTIPATH
4348 case NVME_AER_NOTICE_ANA:
4349 if (!ctrl->ana_log_buf)
4350 break;
4351 queue_work(nvme_wq, &ctrl->ana_work);
4352 break;
4353#endif
4354 case NVME_AER_NOTICE_DISC_CHANGED:
4355 ctrl->aen_result = result;
4356 break;
4357 default:
4358 dev_warn(ctrl->device, "async event result %08x\n", result);
4359 }
4360}
4361
4362void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4363 volatile union nvme_result *res)
4364{
4365 u32 result = le32_to_cpu(res->u32);
4366 u32 aer_type = result & 0x07;
4367
4368 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4369 return;
4370
4371 switch (aer_type) {
4372 case NVME_AER_NOTICE:
4373 nvme_handle_aen_notice(ctrl, result);
4374 break;
4375 case NVME_AER_ERROR:
4376 case NVME_AER_SMART:
4377 case NVME_AER_CSS:
4378 case NVME_AER_VS:
4379 trace_nvme_async_event(ctrl, aer_type);
4380 ctrl->aen_result = result;
4381 break;
4382 default:
4383 break;
4384 }
4385 queue_work(nvme_wq, &ctrl->async_event_work);
4386}
4387EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4388
4389void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4390{
4391 nvme_mpath_stop(ctrl);
4392 nvme_stop_keep_alive(ctrl);
4393 nvme_stop_failfast_work(ctrl);
4394 flush_work(&ctrl->async_event_work);
4395 cancel_work_sync(&ctrl->fw_act_work);
4396}
4397EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4398
4399void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4400{
4401 nvme_start_keep_alive(ctrl);
4402
4403 nvme_enable_aen(ctrl);
4404
4405 if (ctrl->queue_count > 1) {
4406 nvme_queue_scan(ctrl);
4407 nvme_start_queues(ctrl);
4408 }
4409}
4410EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4411
4412void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4413{
4414 nvme_hwmon_exit(ctrl);
4415 nvme_fault_inject_fini(&ctrl->fault_inject);
4416 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4417 cdev_device_del(&ctrl->cdev, ctrl->device);
4418 nvme_put_ctrl(ctrl);
4419}
4420EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4421
4422static void nvme_free_cels(struct nvme_ctrl *ctrl)
4423{
4424 struct nvme_effects_log *cel;
4425 unsigned long i;
4426
4427 xa_for_each(&ctrl->cels, i, cel) {
4428 xa_erase(&ctrl->cels, i);
4429 kfree(cel);
4430 }
4431
4432 xa_destroy(&ctrl->cels);
4433}
4434
4435static void nvme_free_ctrl(struct device *dev)
4436{
4437 struct nvme_ctrl *ctrl =
4438 container_of(dev, struct nvme_ctrl, ctrl_device);
4439 struct nvme_subsystem *subsys = ctrl->subsys;
4440
4441 if (!subsys || ctrl->instance != subsys->instance)
4442 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4443
4444 nvme_free_cels(ctrl);
4445 nvme_mpath_uninit(ctrl);
4446 __free_page(ctrl->discard_page);
4447
4448 if (subsys) {
4449 mutex_lock(&nvme_subsystems_lock);
4450 list_del(&ctrl->subsys_entry);
4451 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4452 mutex_unlock(&nvme_subsystems_lock);
4453 }
4454
4455 ctrl->ops->free_ctrl(ctrl);
4456
4457 if (subsys)
4458 nvme_put_subsystem(subsys);
4459}
4460
4461
4462
4463
4464
4465
4466int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4467 const struct nvme_ctrl_ops *ops, unsigned long quirks)
4468{
4469 int ret;
4470
4471 ctrl->state = NVME_CTRL_NEW;
4472 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4473 spin_lock_init(&ctrl->lock);
4474 mutex_init(&ctrl->scan_lock);
4475 INIT_LIST_HEAD(&ctrl->namespaces);
4476 xa_init(&ctrl->cels);
4477 init_rwsem(&ctrl->namespaces_rwsem);
4478 ctrl->dev = dev;
4479 ctrl->ops = ops;
4480 ctrl->quirks = quirks;
4481 ctrl->numa_node = NUMA_NO_NODE;
4482 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4483 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4484 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4485 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4486 init_waitqueue_head(&ctrl->state_wq);
4487
4488 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4489 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4490 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4491 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4492
4493 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4494 PAGE_SIZE);
4495 ctrl->discard_page = alloc_page(GFP_KERNEL);
4496 if (!ctrl->discard_page) {
4497 ret = -ENOMEM;
4498 goto out;
4499 }
4500
4501 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4502 if (ret < 0)
4503 goto out;
4504 ctrl->instance = ret;
4505
4506 device_initialize(&ctrl->ctrl_device);
4507 ctrl->device = &ctrl->ctrl_device;
4508 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4509 ctrl->instance);
4510 ctrl->device->class = nvme_class;
4511 ctrl->device->parent = ctrl->dev;
4512 ctrl->device->groups = nvme_dev_attr_groups;
4513 ctrl->device->release = nvme_free_ctrl;
4514 dev_set_drvdata(ctrl->device, ctrl);
4515 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4516 if (ret)
4517 goto out_release_instance;
4518
4519 nvme_get_ctrl(ctrl);
4520 cdev_init(&ctrl->cdev, &nvme_dev_fops);
4521 ctrl->cdev.owner = ops->module;
4522 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4523 if (ret)
4524 goto out_free_name;
4525
4526
4527
4528
4529
4530 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4531 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4532 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4533
4534 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4535 nvme_mpath_init_ctrl(ctrl);
4536
4537 return 0;
4538out_free_name:
4539 nvme_put_ctrl(ctrl);
4540 kfree_const(ctrl->device->kobj.name);
4541out_release_instance:
4542 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4543out:
4544 if (ctrl->discard_page)
4545 __free_page(ctrl->discard_page);
4546 return ret;
4547}
4548EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4549
4550
4551
4552
4553
4554
4555
4556
4557void nvme_kill_queues(struct nvme_ctrl *ctrl)
4558{
4559 struct nvme_ns *ns;
4560
4561 down_read(&ctrl->namespaces_rwsem);
4562
4563
4564 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4565 blk_mq_unquiesce_queue(ctrl->admin_q);
4566
4567 list_for_each_entry(ns, &ctrl->namespaces, list)
4568 nvme_set_queue_dying(ns);
4569
4570 up_read(&ctrl->namespaces_rwsem);
4571}
4572EXPORT_SYMBOL_GPL(nvme_kill_queues);
4573
4574void nvme_unfreeze(struct nvme_ctrl *ctrl)
4575{
4576 struct nvme_ns *ns;
4577
4578 down_read(&ctrl->namespaces_rwsem);
4579 list_for_each_entry(ns, &ctrl->namespaces, list)
4580 blk_mq_unfreeze_queue(ns->queue);
4581 up_read(&ctrl->namespaces_rwsem);
4582}
4583EXPORT_SYMBOL_GPL(nvme_unfreeze);
4584
4585int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4586{
4587 struct nvme_ns *ns;
4588
4589 down_read(&ctrl->namespaces_rwsem);
4590 list_for_each_entry(ns, &ctrl->namespaces, list) {
4591 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4592 if (timeout <= 0)
4593 break;
4594 }
4595 up_read(&ctrl->namespaces_rwsem);
4596 return timeout;
4597}
4598EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4599
4600void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4601{
4602 struct nvme_ns *ns;
4603
4604 down_read(&ctrl->namespaces_rwsem);
4605 list_for_each_entry(ns, &ctrl->namespaces, list)
4606 blk_mq_freeze_queue_wait(ns->queue);
4607 up_read(&ctrl->namespaces_rwsem);
4608}
4609EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4610
4611void nvme_start_freeze(struct nvme_ctrl *ctrl)
4612{
4613 struct nvme_ns *ns;
4614
4615 down_read(&ctrl->namespaces_rwsem);
4616 list_for_each_entry(ns, &ctrl->namespaces, list)
4617 blk_freeze_queue_start(ns->queue);
4618 up_read(&ctrl->namespaces_rwsem);
4619}
4620EXPORT_SYMBOL_GPL(nvme_start_freeze);
4621
4622void nvme_stop_queues(struct nvme_ctrl *ctrl)
4623{
4624 struct nvme_ns *ns;
4625
4626 down_read(&ctrl->namespaces_rwsem);
4627 list_for_each_entry(ns, &ctrl->namespaces, list)
4628 blk_mq_quiesce_queue(ns->queue);
4629 up_read(&ctrl->namespaces_rwsem);
4630}
4631EXPORT_SYMBOL_GPL(nvme_stop_queues);
4632
4633void nvme_start_queues(struct nvme_ctrl *ctrl)
4634{
4635 struct nvme_ns *ns;
4636
4637 down_read(&ctrl->namespaces_rwsem);
4638 list_for_each_entry(ns, &ctrl->namespaces, list)
4639 blk_mq_unquiesce_queue(ns->queue);
4640 up_read(&ctrl->namespaces_rwsem);
4641}
4642EXPORT_SYMBOL_GPL(nvme_start_queues);
4643
4644void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4645{
4646 struct nvme_ns *ns;
4647
4648 down_read(&ctrl->namespaces_rwsem);
4649 list_for_each_entry(ns, &ctrl->namespaces, list)
4650 blk_sync_queue(ns->queue);
4651 up_read(&ctrl->namespaces_rwsem);
4652}
4653EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4654
4655void nvme_sync_queues(struct nvme_ctrl *ctrl)
4656{
4657 nvme_sync_io_queues(ctrl);
4658 if (ctrl->admin_q)
4659 blk_sync_queue(ctrl->admin_q);
4660}
4661EXPORT_SYMBOL_GPL(nvme_sync_queues);
4662
4663struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4664{
4665 if (file->f_op != &nvme_dev_fops)
4666 return NULL;
4667 return file->private_data;
4668}
4669EXPORT_SYMBOL_GPL(nvme_ctrl_from_file);
4670
4671
4672
4673
4674static inline void _nvme_check_size(void)
4675{
4676 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4677 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4678 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4679 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4680 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4681 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4682 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4683 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4684 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4685 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4686 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4687 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4688 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4689 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4690 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4691 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4692 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4693 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4694 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4695 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4696}
4697
4698
4699static int __init nvme_core_init(void)
4700{
4701 int result = -ENOMEM;
4702
4703 _nvme_check_size();
4704
4705 nvme_wq = alloc_workqueue("nvme-wq",
4706 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4707 if (!nvme_wq)
4708 goto out;
4709
4710 nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4711 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4712 if (!nvme_reset_wq)
4713 goto destroy_wq;
4714
4715 nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4716 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4717 if (!nvme_delete_wq)
4718 goto destroy_reset_wq;
4719
4720 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
4721 NVME_MINORS, "nvme");
4722 if (result < 0)
4723 goto destroy_delete_wq;
4724
4725 nvme_class = class_create(THIS_MODULE, "nvme");
4726 if (IS_ERR(nvme_class)) {
4727 result = PTR_ERR(nvme_class);
4728 goto unregister_chrdev;
4729 }
4730 nvme_class->dev_uevent = nvme_class_uevent;
4731
4732 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4733 if (IS_ERR(nvme_subsys_class)) {
4734 result = PTR_ERR(nvme_subsys_class);
4735 goto destroy_class;
4736 }
4737
4738 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
4739 "nvme-generic");
4740 if (result < 0)
4741 goto destroy_subsys_class;
4742
4743 nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
4744 if (IS_ERR(nvme_ns_chr_class)) {
4745 result = PTR_ERR(nvme_ns_chr_class);
4746 goto unregister_generic_ns;
4747 }
4748
4749 return 0;
4750
4751unregister_generic_ns:
4752 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4753destroy_subsys_class:
4754 class_destroy(nvme_subsys_class);
4755destroy_class:
4756 class_destroy(nvme_class);
4757unregister_chrdev:
4758 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4759destroy_delete_wq:
4760 destroy_workqueue(nvme_delete_wq);
4761destroy_reset_wq:
4762 destroy_workqueue(nvme_reset_wq);
4763destroy_wq:
4764 destroy_workqueue(nvme_wq);
4765out:
4766 return result;
4767}
4768
4769static void __exit nvme_core_exit(void)
4770{
4771 class_destroy(nvme_ns_chr_class);
4772 class_destroy(nvme_subsys_class);
4773 class_destroy(nvme_class);
4774 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4775 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4776 destroy_workqueue(nvme_delete_wq);
4777 destroy_workqueue(nvme_reset_wq);
4778 destroy_workqueue(nvme_wq);
4779 ida_destroy(&nvme_ns_chr_minor_ida);
4780 ida_destroy(&nvme_instance_ida);
4781}
4782
4783MODULE_LICENSE("GPL");
4784MODULE_VERSION("1.0");
4785module_init(nvme_core_init);
4786module_exit(nvme_core_exit);
4787