1
2
3
4
5
6
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>
9#include <linux/blk-integrity.h>
10#include <linux/compat.h>
11#include <linux/delay.h>
12#include <linux/errno.h>
13#include <linux/hdreg.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/slab.h>
18#include <linux/types.h>
19#include <linux/pr.h>
20#include <linux/ptrace.h>
21#include <linux/nvme_ioctl.h>
22#include <linux/pm_qos.h>
23#include <asm/unaligned.h>
24
25#include "nvme.h"
26#include "fabrics.h"
27
28#define CREATE_TRACE_POINTS
29#include "trace.h"
30
31#define NVME_MINORS (1U << MINORBITS)
32
33unsigned int admin_timeout = 60;
34module_param(admin_timeout, uint, 0644);
35MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36EXPORT_SYMBOL_GPL(admin_timeout);
37
38unsigned int nvme_io_timeout = 30;
39module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41EXPORT_SYMBOL_GPL(nvme_io_timeout);
42
43static unsigned char shutdown_timeout = 5;
44module_param(shutdown_timeout, byte, 0644);
45MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46
47static u8 nvme_max_retries = 5;
48module_param_named(max_retries, nvme_max_retries, byte, 0644);
49MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50
51static unsigned long default_ps_max_latency_us = 100000;
52module_param(default_ps_max_latency_us, ulong, 0644);
53MODULE_PARM_DESC(default_ps_max_latency_us,
54 "max power saving latency for new devices; use PM QOS to change per device");
55
56static bool force_apst;
57module_param(force_apst, bool, 0644);
58MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59
60static unsigned long apst_primary_timeout_ms = 100;
61module_param(apst_primary_timeout_ms, ulong, 0644);
62MODULE_PARM_DESC(apst_primary_timeout_ms,
63 "primary APST timeout in ms");
64
65static unsigned long apst_secondary_timeout_ms = 2000;
66module_param(apst_secondary_timeout_ms, ulong, 0644);
67MODULE_PARM_DESC(apst_secondary_timeout_ms,
68 "secondary APST timeout in ms");
69
70static unsigned long apst_primary_latency_tol_us = 15000;
71module_param(apst_primary_latency_tol_us, ulong, 0644);
72MODULE_PARM_DESC(apst_primary_latency_tol_us,
73 "primary APST latency tolerance in us");
74
75static unsigned long apst_secondary_latency_tol_us = 100000;
76module_param(apst_secondary_latency_tol_us, ulong, 0644);
77MODULE_PARM_DESC(apst_secondary_latency_tol_us,
78 "secondary APST latency tolerance in us");
79
80static bool streams;
81module_param(streams, bool, 0644);
82MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
83
84
85
86
87
88
89
90
91
92
93
94
95struct workqueue_struct *nvme_wq;
96EXPORT_SYMBOL_GPL(nvme_wq);
97
98struct workqueue_struct *nvme_reset_wq;
99EXPORT_SYMBOL_GPL(nvme_reset_wq);
100
101struct workqueue_struct *nvme_delete_wq;
102EXPORT_SYMBOL_GPL(nvme_delete_wq);
103
104static LIST_HEAD(nvme_subsystems);
105static DEFINE_MUTEX(nvme_subsystems_lock);
106
107static DEFINE_IDA(nvme_instance_ida);
108static dev_t nvme_ctrl_base_chr_devt;
109static struct class *nvme_class;
110static struct class *nvme_subsys_class;
111
112static DEFINE_IDA(nvme_ns_chr_minor_ida);
113static dev_t nvme_ns_chr_devt;
114static struct class *nvme_ns_chr_class;
115
116static void nvme_put_subsystem(struct nvme_subsystem *subsys);
117static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
118 unsigned nsid);
119static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
120 struct nvme_command *cmd);
121
122void nvme_queue_scan(struct nvme_ctrl *ctrl)
123{
124
125
126
127 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
128 queue_work(nvme_wq, &ctrl->scan_work);
129}
130
131
132
133
134
135
136
137int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
138{
139 if (ctrl->state != NVME_CTRL_RESETTING)
140 return -EBUSY;
141 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
142 return -EBUSY;
143 return 0;
144}
145EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
146
147static void nvme_failfast_work(struct work_struct *work)
148{
149 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
150 struct nvme_ctrl, failfast_work);
151
152 if (ctrl->state != NVME_CTRL_CONNECTING)
153 return;
154
155 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
156 dev_info(ctrl->device, "failfast expired\n");
157 nvme_kick_requeue_lists(ctrl);
158}
159
160static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
161{
162 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
163 return;
164
165 schedule_delayed_work(&ctrl->failfast_work,
166 ctrl->opts->fast_io_fail_tmo * HZ);
167}
168
169static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
170{
171 if (!ctrl->opts)
172 return;
173
174 cancel_delayed_work_sync(&ctrl->failfast_work);
175 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
176}
177
178
179int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
180{
181 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
182 return -EBUSY;
183 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
184 return -EBUSY;
185 return 0;
186}
187EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
188
189int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
190{
191 int ret;
192
193 ret = nvme_reset_ctrl(ctrl);
194 if (!ret) {
195 flush_work(&ctrl->reset_work);
196 if (ctrl->state != NVME_CTRL_LIVE)
197 ret = -ENETRESET;
198 }
199
200 return ret;
201}
202
203static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
204{
205 dev_info(ctrl->device,
206 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
207
208 flush_work(&ctrl->reset_work);
209 nvme_stop_ctrl(ctrl);
210 nvme_remove_namespaces(ctrl);
211 ctrl->ops->delete_ctrl(ctrl);
212 nvme_uninit_ctrl(ctrl);
213}
214
215static void nvme_delete_ctrl_work(struct work_struct *work)
216{
217 struct nvme_ctrl *ctrl =
218 container_of(work, struct nvme_ctrl, delete_work);
219
220 nvme_do_delete_ctrl(ctrl);
221}
222
223int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
224{
225 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
226 return -EBUSY;
227 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
228 return -EBUSY;
229 return 0;
230}
231EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
232
233static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
234{
235
236
237
238
239 nvme_get_ctrl(ctrl);
240 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
241 nvme_do_delete_ctrl(ctrl);
242 nvme_put_ctrl(ctrl);
243}
244
245static blk_status_t nvme_error_status(u16 status)
246{
247 switch (status & 0x7ff) {
248 case NVME_SC_SUCCESS:
249 return BLK_STS_OK;
250 case NVME_SC_CAP_EXCEEDED:
251 return BLK_STS_NOSPC;
252 case NVME_SC_LBA_RANGE:
253 case NVME_SC_CMD_INTERRUPTED:
254 case NVME_SC_NS_NOT_READY:
255 return BLK_STS_TARGET;
256 case NVME_SC_BAD_ATTRIBUTES:
257 case NVME_SC_ONCS_NOT_SUPPORTED:
258 case NVME_SC_INVALID_OPCODE:
259 case NVME_SC_INVALID_FIELD:
260 case NVME_SC_INVALID_NS:
261 return BLK_STS_NOTSUPP;
262 case NVME_SC_WRITE_FAULT:
263 case NVME_SC_READ_ERROR:
264 case NVME_SC_UNWRITTEN_BLOCK:
265 case NVME_SC_ACCESS_DENIED:
266 case NVME_SC_READ_ONLY:
267 case NVME_SC_COMPARE_FAILED:
268 return BLK_STS_MEDIUM;
269 case NVME_SC_GUARD_CHECK:
270 case NVME_SC_APPTAG_CHECK:
271 case NVME_SC_REFTAG_CHECK:
272 case NVME_SC_INVALID_PI:
273 return BLK_STS_PROTECTION;
274 case NVME_SC_RESERVATION_CONFLICT:
275 return BLK_STS_NEXUS;
276 case NVME_SC_HOST_PATH_ERROR:
277 return BLK_STS_TRANSPORT;
278 case NVME_SC_ZONE_TOO_MANY_ACTIVE:
279 return BLK_STS_ZONE_ACTIVE_RESOURCE;
280 case NVME_SC_ZONE_TOO_MANY_OPEN:
281 return BLK_STS_ZONE_OPEN_RESOURCE;
282 default:
283 return BLK_STS_IOERR;
284 }
285}
286
287static void nvme_retry_req(struct request *req)
288{
289 unsigned long delay = 0;
290 u16 crd;
291
292
293 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
294 if (crd)
295 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
296
297 nvme_req(req)->retries++;
298 blk_mq_requeue_request(req, false);
299 blk_mq_delay_kick_requeue_list(req->q, delay);
300}
301
302enum nvme_disposition {
303 COMPLETE,
304 RETRY,
305 FAILOVER,
306};
307
308static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
309{
310 if (likely(nvme_req(req)->status == 0))
311 return COMPLETE;
312
313 if (blk_noretry_request(req) ||
314 (nvme_req(req)->status & NVME_SC_DNR) ||
315 nvme_req(req)->retries >= nvme_max_retries)
316 return COMPLETE;
317
318 if (req->cmd_flags & REQ_NVME_MPATH) {
319 if (nvme_is_path_error(nvme_req(req)->status) ||
320 blk_queue_dying(req->q))
321 return FAILOVER;
322 } else {
323 if (blk_queue_dying(req->q))
324 return COMPLETE;
325 }
326
327 return RETRY;
328}
329
330static inline void nvme_end_req_zoned(struct request *req)
331{
332 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
333 req_op(req) == REQ_OP_ZONE_APPEND)
334 req->__sector = nvme_lba_to_sect(req->q->queuedata,
335 le64_to_cpu(nvme_req(req)->result.u64));
336}
337
338static inline void nvme_end_req(struct request *req)
339{
340 blk_status_t status = nvme_error_status(nvme_req(req)->status);
341
342 nvme_end_req_zoned(req);
343 nvme_trace_bio_complete(req);
344 blk_mq_end_request(req, status);
345}
346
347void nvme_complete_rq(struct request *req)
348{
349 trace_nvme_complete_rq(req);
350 nvme_cleanup_cmd(req);
351
352 if (nvme_req(req)->ctrl->kas)
353 nvme_req(req)->ctrl->comp_seen = true;
354
355 switch (nvme_decide_disposition(req)) {
356 case COMPLETE:
357 nvme_end_req(req);
358 return;
359 case RETRY:
360 nvme_retry_req(req);
361 return;
362 case FAILOVER:
363 nvme_failover_req(req);
364 return;
365 }
366}
367EXPORT_SYMBOL_GPL(nvme_complete_rq);
368
369void nvme_complete_batch_req(struct request *req)
370{
371 trace_nvme_complete_rq(req);
372 nvme_cleanup_cmd(req);
373 nvme_end_req_zoned(req);
374}
375EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
376
377
378
379
380
381
382
383blk_status_t nvme_host_path_error(struct request *req)
384{
385 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
386 blk_mq_set_request_complete(req);
387 nvme_complete_rq(req);
388 return BLK_STS_OK;
389}
390EXPORT_SYMBOL_GPL(nvme_host_path_error);
391
392bool nvme_cancel_request(struct request *req, void *data, bool reserved)
393{
394 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
395 "Cancelling I/O %d", req->tag);
396
397
398 if (blk_mq_request_completed(req))
399 return true;
400
401 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
402 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
403 blk_mq_complete_request(req);
404 return true;
405}
406EXPORT_SYMBOL_GPL(nvme_cancel_request);
407
408void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
409{
410 if (ctrl->tagset) {
411 blk_mq_tagset_busy_iter(ctrl->tagset,
412 nvme_cancel_request, ctrl);
413 blk_mq_tagset_wait_completed_request(ctrl->tagset);
414 }
415}
416EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
417
418void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
419{
420 if (ctrl->admin_tagset) {
421 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
422 nvme_cancel_request, ctrl);
423 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
424 }
425}
426EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
427
428bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
429 enum nvme_ctrl_state new_state)
430{
431 enum nvme_ctrl_state old_state;
432 unsigned long flags;
433 bool changed = false;
434
435 spin_lock_irqsave(&ctrl->lock, flags);
436
437 old_state = ctrl->state;
438 switch (new_state) {
439 case NVME_CTRL_LIVE:
440 switch (old_state) {
441 case NVME_CTRL_NEW:
442 case NVME_CTRL_RESETTING:
443 case NVME_CTRL_CONNECTING:
444 changed = true;
445 fallthrough;
446 default:
447 break;
448 }
449 break;
450 case NVME_CTRL_RESETTING:
451 switch (old_state) {
452 case NVME_CTRL_NEW:
453 case NVME_CTRL_LIVE:
454 changed = true;
455 fallthrough;
456 default:
457 break;
458 }
459 break;
460 case NVME_CTRL_CONNECTING:
461 switch (old_state) {
462 case NVME_CTRL_NEW:
463 case NVME_CTRL_RESETTING:
464 changed = true;
465 fallthrough;
466 default:
467 break;
468 }
469 break;
470 case NVME_CTRL_DELETING:
471 switch (old_state) {
472 case NVME_CTRL_LIVE:
473 case NVME_CTRL_RESETTING:
474 case NVME_CTRL_CONNECTING:
475 changed = true;
476 fallthrough;
477 default:
478 break;
479 }
480 break;
481 case NVME_CTRL_DELETING_NOIO:
482 switch (old_state) {
483 case NVME_CTRL_DELETING:
484 case NVME_CTRL_DEAD:
485 changed = true;
486 fallthrough;
487 default:
488 break;
489 }
490 break;
491 case NVME_CTRL_DEAD:
492 switch (old_state) {
493 case NVME_CTRL_DELETING:
494 changed = true;
495 fallthrough;
496 default:
497 break;
498 }
499 break;
500 default:
501 break;
502 }
503
504 if (changed) {
505 ctrl->state = new_state;
506 wake_up_all(&ctrl->state_wq);
507 }
508
509 spin_unlock_irqrestore(&ctrl->lock, flags);
510 if (!changed)
511 return false;
512
513 if (ctrl->state == NVME_CTRL_LIVE) {
514 if (old_state == NVME_CTRL_CONNECTING)
515 nvme_stop_failfast_work(ctrl);
516 nvme_kick_requeue_lists(ctrl);
517 } else if (ctrl->state == NVME_CTRL_CONNECTING &&
518 old_state == NVME_CTRL_RESETTING) {
519 nvme_start_failfast_work(ctrl);
520 }
521 return changed;
522}
523EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
524
525
526
527
528static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
529{
530 switch (ctrl->state) {
531 case NVME_CTRL_NEW:
532 case NVME_CTRL_LIVE:
533 case NVME_CTRL_RESETTING:
534 case NVME_CTRL_CONNECTING:
535 return false;
536 case NVME_CTRL_DELETING:
537 case NVME_CTRL_DELETING_NOIO:
538 case NVME_CTRL_DEAD:
539 return true;
540 default:
541 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
542 return true;
543 }
544}
545
546
547
548
549
550bool nvme_wait_reset(struct nvme_ctrl *ctrl)
551{
552 wait_event(ctrl->state_wq,
553 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
554 nvme_state_terminal(ctrl));
555 return ctrl->state == NVME_CTRL_RESETTING;
556}
557EXPORT_SYMBOL_GPL(nvme_wait_reset);
558
559static void nvme_free_ns_head(struct kref *ref)
560{
561 struct nvme_ns_head *head =
562 container_of(ref, struct nvme_ns_head, ref);
563
564 nvme_mpath_remove_disk(head);
565 ida_simple_remove(&head->subsys->ns_ida, head->instance);
566 cleanup_srcu_struct(&head->srcu);
567 nvme_put_subsystem(head->subsys);
568 kfree(head);
569}
570
571bool nvme_tryget_ns_head(struct nvme_ns_head *head)
572{
573 return kref_get_unless_zero(&head->ref);
574}
575
576void nvme_put_ns_head(struct nvme_ns_head *head)
577{
578 kref_put(&head->ref, nvme_free_ns_head);
579}
580
581static void nvme_free_ns(struct kref *kref)
582{
583 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
584
585 put_disk(ns->disk);
586 nvme_put_ns_head(ns->head);
587 nvme_put_ctrl(ns->ctrl);
588 kfree(ns);
589}
590
591static inline bool nvme_get_ns(struct nvme_ns *ns)
592{
593 return kref_get_unless_zero(&ns->kref);
594}
595
596void nvme_put_ns(struct nvme_ns *ns)
597{
598 kref_put(&ns->kref, nvme_free_ns);
599}
600EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
601
602static inline void nvme_clear_nvme_request(struct request *req)
603{
604 nvme_req(req)->status = 0;
605 nvme_req(req)->retries = 0;
606 nvme_req(req)->flags = 0;
607 req->rq_flags |= RQF_DONTPREP;
608}
609
610static inline unsigned int nvme_req_op(struct nvme_command *cmd)
611{
612 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
613}
614
615static inline void nvme_init_request(struct request *req,
616 struct nvme_command *cmd)
617{
618 if (req->q->queuedata)
619 req->timeout = NVME_IO_TIMEOUT;
620 else
621 req->timeout = NVME_ADMIN_TIMEOUT;
622
623
624 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
625
626 req->cmd_flags |= REQ_FAILFAST_DRIVER;
627 if (req->mq_hctx->type == HCTX_TYPE_POLL)
628 req->cmd_flags |= REQ_POLLED;
629 nvme_clear_nvme_request(req);
630 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
631}
632
633struct request *nvme_alloc_request(struct request_queue *q,
634 struct nvme_command *cmd, blk_mq_req_flags_t flags)
635{
636 struct request *req;
637
638 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
639 if (!IS_ERR(req))
640 nvme_init_request(req, cmd);
641 return req;
642}
643EXPORT_SYMBOL_GPL(nvme_alloc_request);
644
645static struct request *nvme_alloc_request_qid(struct request_queue *q,
646 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
647{
648 struct request *req;
649
650 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
651 qid ? qid - 1 : 0);
652 if (!IS_ERR(req))
653 nvme_init_request(req, cmd);
654 return req;
655}
656
657
658
659
660
661
662
663
664
665
666blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
667 struct request *rq)
668{
669 if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
670 ctrl->state != NVME_CTRL_DELETING &&
671 ctrl->state != NVME_CTRL_DEAD &&
672 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
673 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
674 return BLK_STS_RESOURCE;
675 return nvme_host_path_error(rq);
676}
677EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
678
679bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
680 bool queue_live)
681{
682 struct nvme_request *req = nvme_req(rq);
683
684
685
686
687
688
689
690
691
692 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
693 return false;
694
695 if (ctrl->ops->flags & NVME_F_FABRICS) {
696
697
698
699
700
701 switch (ctrl->state) {
702 case NVME_CTRL_CONNECTING:
703 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
704 req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
705 return true;
706 break;
707 default:
708 break;
709 case NVME_CTRL_DEAD:
710 return false;
711 }
712 }
713
714 return queue_live;
715}
716EXPORT_SYMBOL_GPL(__nvme_check_ready);
717
718static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
719{
720 struct nvme_command c = { };
721
722 c.directive.opcode = nvme_admin_directive_send;
723 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
724 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
725 c.directive.dtype = NVME_DIR_IDENTIFY;
726 c.directive.tdtype = NVME_DIR_STREAMS;
727 c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
728
729 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
730}
731
732static int nvme_disable_streams(struct nvme_ctrl *ctrl)
733{
734 return nvme_toggle_streams(ctrl, false);
735}
736
737static int nvme_enable_streams(struct nvme_ctrl *ctrl)
738{
739 return nvme_toggle_streams(ctrl, true);
740}
741
742static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
743 struct streams_directive_params *s, u32 nsid)
744{
745 struct nvme_command c = { };
746
747 memset(s, 0, sizeof(*s));
748
749 c.directive.opcode = nvme_admin_directive_recv;
750 c.directive.nsid = cpu_to_le32(nsid);
751 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
752 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
753 c.directive.dtype = NVME_DIR_STREAMS;
754
755 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
756}
757
758static int nvme_configure_directives(struct nvme_ctrl *ctrl)
759{
760 struct streams_directive_params s;
761 int ret;
762
763 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
764 return 0;
765 if (!streams)
766 return 0;
767
768 ret = nvme_enable_streams(ctrl);
769 if (ret)
770 return ret;
771
772 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
773 if (ret)
774 goto out_disable_stream;
775
776 ctrl->nssa = le16_to_cpu(s.nssa);
777 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
778 dev_info(ctrl->device, "too few streams (%u) available\n",
779 ctrl->nssa);
780 goto out_disable_stream;
781 }
782
783 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
784 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
785 return 0;
786
787out_disable_stream:
788 nvme_disable_streams(ctrl);
789 return ret;
790}
791
792
793
794
795
796static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
797 struct request *req, u16 *control,
798 u32 *dsmgmt)
799{
800 enum rw_hint streamid = req->write_hint;
801
802 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
803 streamid = 0;
804 else {
805 streamid--;
806 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
807 return;
808
809 *control |= NVME_RW_DTYPE_STREAMS;
810 *dsmgmt |= streamid << 16;
811 }
812
813 if (streamid < ARRAY_SIZE(req->q->write_hints))
814 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
815}
816
817static inline void nvme_setup_flush(struct nvme_ns *ns,
818 struct nvme_command *cmnd)
819{
820 memset(cmnd, 0, sizeof(*cmnd));
821 cmnd->common.opcode = nvme_cmd_flush;
822 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
823}
824
825static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
826 struct nvme_command *cmnd)
827{
828 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
829 struct nvme_dsm_range *range;
830 struct bio *bio;
831
832
833
834
835
836
837 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
838
839 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
840 if (!range) {
841
842
843
844
845
846 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
847 return BLK_STS_RESOURCE;
848
849 range = page_address(ns->ctrl->discard_page);
850 }
851
852 __rq_for_each_bio(bio, req) {
853 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
854 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
855
856 if (n < segments) {
857 range[n].cattr = cpu_to_le32(0);
858 range[n].nlb = cpu_to_le32(nlb);
859 range[n].slba = cpu_to_le64(slba);
860 }
861 n++;
862 }
863
864 if (WARN_ON_ONCE(n != segments)) {
865 if (virt_to_page(range) == ns->ctrl->discard_page)
866 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
867 else
868 kfree(range);
869 return BLK_STS_IOERR;
870 }
871
872 memset(cmnd, 0, sizeof(*cmnd));
873 cmnd->dsm.opcode = nvme_cmd_dsm;
874 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
875 cmnd->dsm.nr = cpu_to_le32(segments - 1);
876 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
877
878 req->special_vec.bv_page = virt_to_page(range);
879 req->special_vec.bv_offset = offset_in_page(range);
880 req->special_vec.bv_len = alloc_size;
881 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
882
883 return BLK_STS_OK;
884}
885
886static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
887 struct request *req, struct nvme_command *cmnd)
888{
889 memset(cmnd, 0, sizeof(*cmnd));
890
891 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
892 return nvme_setup_discard(ns, req, cmnd);
893
894 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
895 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
896 cmnd->write_zeroes.slba =
897 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
898 cmnd->write_zeroes.length =
899 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
900
901 if (nvme_ns_has_pi(ns)) {
902 cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
903
904 switch (ns->pi_type) {
905 case NVME_NS_DPS_PI_TYPE1:
906 case NVME_NS_DPS_PI_TYPE2:
907 cmnd->write_zeroes.reftag =
908 cpu_to_le32(t10_pi_ref_tag(req));
909 break;
910 }
911 }
912
913 return BLK_STS_OK;
914}
915
916static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
917 struct request *req, struct nvme_command *cmnd,
918 enum nvme_opcode op)
919{
920 struct nvme_ctrl *ctrl = ns->ctrl;
921 u16 control = 0;
922 u32 dsmgmt = 0;
923
924 if (req->cmd_flags & REQ_FUA)
925 control |= NVME_RW_FUA;
926 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
927 control |= NVME_RW_LR;
928
929 if (req->cmd_flags & REQ_RAHEAD)
930 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
931
932 cmnd->rw.opcode = op;
933 cmnd->rw.flags = 0;
934 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
935 cmnd->rw.rsvd2 = 0;
936 cmnd->rw.metadata = 0;
937 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
938 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
939 cmnd->rw.reftag = 0;
940 cmnd->rw.apptag = 0;
941 cmnd->rw.appmask = 0;
942
943 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
944 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
945
946 if (ns->ms) {
947
948
949
950
951
952
953 if (!blk_integrity_rq(req)) {
954 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
955 return BLK_STS_NOTSUPP;
956 control |= NVME_RW_PRINFO_PRACT;
957 }
958
959 switch (ns->pi_type) {
960 case NVME_NS_DPS_PI_TYPE3:
961 control |= NVME_RW_PRINFO_PRCHK_GUARD;
962 break;
963 case NVME_NS_DPS_PI_TYPE1:
964 case NVME_NS_DPS_PI_TYPE2:
965 control |= NVME_RW_PRINFO_PRCHK_GUARD |
966 NVME_RW_PRINFO_PRCHK_REF;
967 if (op == nvme_cmd_zone_append)
968 control |= NVME_RW_APPEND_PIREMAP;
969 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
970 break;
971 }
972 }
973
974 cmnd->rw.control = cpu_to_le16(control);
975 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
976 return 0;
977}
978
979void nvme_cleanup_cmd(struct request *req)
980{
981 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
982 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
983
984 if (req->special_vec.bv_page == ctrl->discard_page)
985 clear_bit_unlock(0, &ctrl->discard_page_busy);
986 else
987 kfree(bvec_virt(&req->special_vec));
988 }
989}
990EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
991
992blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
993{
994 struct nvme_command *cmd = nvme_req(req)->cmd;
995 blk_status_t ret = BLK_STS_OK;
996
997 if (!(req->rq_flags & RQF_DONTPREP))
998 nvme_clear_nvme_request(req);
999
1000 switch (req_op(req)) {
1001 case REQ_OP_DRV_IN:
1002 case REQ_OP_DRV_OUT:
1003
1004 break;
1005 case REQ_OP_FLUSH:
1006 nvme_setup_flush(ns, cmd);
1007 break;
1008 case REQ_OP_ZONE_RESET_ALL:
1009 case REQ_OP_ZONE_RESET:
1010 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1011 break;
1012 case REQ_OP_ZONE_OPEN:
1013 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1014 break;
1015 case REQ_OP_ZONE_CLOSE:
1016 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1017 break;
1018 case REQ_OP_ZONE_FINISH:
1019 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1020 break;
1021 case REQ_OP_WRITE_ZEROES:
1022 ret = nvme_setup_write_zeroes(ns, req, cmd);
1023 break;
1024 case REQ_OP_DISCARD:
1025 ret = nvme_setup_discard(ns, req, cmd);
1026 break;
1027 case REQ_OP_READ:
1028 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1029 break;
1030 case REQ_OP_WRITE:
1031 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1032 break;
1033 case REQ_OP_ZONE_APPEND:
1034 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1035 break;
1036 default:
1037 WARN_ON_ONCE(1);
1038 return BLK_STS_IOERR;
1039 }
1040
1041 cmd->common.command_id = nvme_cid(req);
1042 trace_nvme_setup_cmd(req, cmd);
1043 return ret;
1044}
1045EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1046
1047
1048
1049
1050
1051
1052
1053static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
1054 bool at_head)
1055{
1056 blk_status_t status;
1057
1058 status = blk_execute_rq(rq, at_head);
1059 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1060 return -EINTR;
1061 if (nvme_req(rq)->status)
1062 return nvme_req(rq)->status;
1063 return blk_status_to_errno(status);
1064}
1065
1066
1067
1068
1069
1070int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1071 union nvme_result *result, void *buffer, unsigned bufflen,
1072 unsigned timeout, int qid, int at_head,
1073 blk_mq_req_flags_t flags)
1074{
1075 struct request *req;
1076 int ret;
1077
1078 if (qid == NVME_QID_ANY)
1079 req = nvme_alloc_request(q, cmd, flags);
1080 else
1081 req = nvme_alloc_request_qid(q, cmd, flags, qid);
1082 if (IS_ERR(req))
1083 return PTR_ERR(req);
1084
1085 if (timeout)
1086 req->timeout = timeout;
1087
1088 if (buffer && bufflen) {
1089 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1090 if (ret)
1091 goto out;
1092 }
1093
1094 ret = nvme_execute_rq(NULL, req, at_head);
1095 if (result && ret >= 0)
1096 *result = nvme_req(req)->result;
1097 out:
1098 blk_mq_free_request(req);
1099 return ret;
1100}
1101EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1102
1103int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1104 void *buffer, unsigned bufflen)
1105{
1106 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1107 NVME_QID_ANY, 0, 0);
1108}
1109EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1110
1111static u32 nvme_known_admin_effects(u8 opcode)
1112{
1113 switch (opcode) {
1114 case nvme_admin_format_nvm:
1115 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1116 NVME_CMD_EFFECTS_CSE_MASK;
1117 case nvme_admin_sanitize_nvm:
1118 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1119 default:
1120 break;
1121 }
1122 return 0;
1123}
1124
1125u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1126{
1127 u32 effects = 0;
1128
1129 if (ns) {
1130 if (ns->head->effects)
1131 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1132 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1133 dev_warn_once(ctrl->device,
1134 "IO command:%02x has unhandled effects:%08x\n",
1135 opcode, effects);
1136 return 0;
1137 }
1138
1139 if (ctrl->effects)
1140 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1141 effects |= nvme_known_admin_effects(opcode);
1142
1143 return effects;
1144}
1145EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
1146
1147static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1148 u8 opcode)
1149{
1150 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1151
1152
1153
1154
1155
1156 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1157 mutex_lock(&ctrl->scan_lock);
1158 mutex_lock(&ctrl->subsys->lock);
1159 nvme_mpath_start_freeze(ctrl->subsys);
1160 nvme_mpath_wait_freeze(ctrl->subsys);
1161 nvme_start_freeze(ctrl);
1162 nvme_wait_freeze(ctrl);
1163 }
1164 return effects;
1165}
1166
1167static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1168 struct nvme_command *cmd, int status)
1169{
1170 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1171 nvme_unfreeze(ctrl);
1172 nvme_mpath_unfreeze(ctrl->subsys);
1173 mutex_unlock(&ctrl->subsys->lock);
1174 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1175 mutex_unlock(&ctrl->scan_lock);
1176 }
1177 if (effects & NVME_CMD_EFFECTS_CCC)
1178 nvme_init_ctrl_finish(ctrl);
1179 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1180 nvme_queue_scan(ctrl);
1181 flush_work(&ctrl->scan_work);
1182 }
1183
1184 switch (cmd->common.opcode) {
1185 case nvme_admin_set_features:
1186 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1187 case NVME_FEAT_KATO:
1188
1189
1190
1191
1192
1193 if (!status)
1194 nvme_update_keep_alive(ctrl, cmd);
1195 break;
1196 default:
1197 break;
1198 }
1199 break;
1200 default:
1201 break;
1202 }
1203}
1204
1205int nvme_execute_passthru_rq(struct request *rq)
1206{
1207 struct nvme_command *cmd = nvme_req(rq)->cmd;
1208 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
1209 struct nvme_ns *ns = rq->q->queuedata;
1210 struct gendisk *disk = ns ? ns->disk : NULL;
1211 u32 effects;
1212 int ret;
1213
1214 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1215 ret = nvme_execute_rq(disk, rq, false);
1216 if (effects)
1217 nvme_passthru_end(ctrl, effects, cmd, ret);
1218
1219 return ret;
1220}
1221EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
1222
1223
1224
1225
1226
1227
1228
1229static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1230{
1231 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1232}
1233
1234static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1235{
1236 struct nvme_ctrl *ctrl = rq->end_io_data;
1237 unsigned long flags;
1238 bool startka = false;
1239
1240 blk_mq_free_request(rq);
1241
1242 if (status) {
1243 dev_err(ctrl->device,
1244 "failed nvme_keep_alive_end_io error=%d\n",
1245 status);
1246 return;
1247 }
1248
1249 ctrl->comp_seen = false;
1250 spin_lock_irqsave(&ctrl->lock, flags);
1251 if (ctrl->state == NVME_CTRL_LIVE ||
1252 ctrl->state == NVME_CTRL_CONNECTING)
1253 startka = true;
1254 spin_unlock_irqrestore(&ctrl->lock, flags);
1255 if (startka)
1256 nvme_queue_keep_alive_work(ctrl);
1257}
1258
1259static void nvme_keep_alive_work(struct work_struct *work)
1260{
1261 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1262 struct nvme_ctrl, ka_work);
1263 bool comp_seen = ctrl->comp_seen;
1264 struct request *rq;
1265
1266 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1267 dev_dbg(ctrl->device,
1268 "reschedule traffic based keep-alive timer\n");
1269 ctrl->comp_seen = false;
1270 nvme_queue_keep_alive_work(ctrl);
1271 return;
1272 }
1273
1274 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
1275 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1276 if (IS_ERR(rq)) {
1277
1278 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1279 nvme_reset_ctrl(ctrl);
1280 return;
1281 }
1282
1283 rq->timeout = ctrl->kato * HZ;
1284 rq->end_io_data = ctrl;
1285 blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
1286}
1287
1288static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1289{
1290 if (unlikely(ctrl->kato == 0))
1291 return;
1292
1293 nvme_queue_keep_alive_work(ctrl);
1294}
1295
1296void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1297{
1298 if (unlikely(ctrl->kato == 0))
1299 return;
1300
1301 cancel_delayed_work_sync(&ctrl->ka_work);
1302}
1303EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1304
1305static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1306 struct nvme_command *cmd)
1307{
1308 unsigned int new_kato =
1309 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1310
1311 dev_info(ctrl->device,
1312 "keep alive interval updated from %u ms to %u ms\n",
1313 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1314
1315 nvme_stop_keep_alive(ctrl);
1316 ctrl->kato = new_kato;
1317 nvme_start_keep_alive(ctrl);
1318}
1319
1320
1321
1322
1323
1324
1325
1326static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1327{
1328 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1329 return ctrl->vs < NVME_VS(1, 2, 0);
1330 return ctrl->vs < NVME_VS(1, 1, 0);
1331}
1332
1333static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1334{
1335 struct nvme_command c = { };
1336 int error;
1337
1338
1339 c.identify.opcode = nvme_admin_identify;
1340 c.identify.cns = NVME_ID_CNS_CTRL;
1341
1342 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1343 if (!*id)
1344 return -ENOMEM;
1345
1346 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1347 sizeof(struct nvme_id_ctrl));
1348 if (error)
1349 kfree(*id);
1350 return error;
1351}
1352
1353static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1354 struct nvme_ns_id_desc *cur, bool *csi_seen)
1355{
1356 const char *warn_str = "ctrl returned bogus length:";
1357 void *data = cur;
1358
1359 switch (cur->nidt) {
1360 case NVME_NIDT_EUI64:
1361 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1362 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1363 warn_str, cur->nidl);
1364 return -1;
1365 }
1366 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1367 return NVME_NIDT_EUI64_LEN;
1368 case NVME_NIDT_NGUID:
1369 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1370 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1371 warn_str, cur->nidl);
1372 return -1;
1373 }
1374 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1375 return NVME_NIDT_NGUID_LEN;
1376 case NVME_NIDT_UUID:
1377 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1378 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1379 warn_str, cur->nidl);
1380 return -1;
1381 }
1382 uuid_copy(&ids->uuid, data + sizeof(*cur));
1383 return NVME_NIDT_UUID_LEN;
1384 case NVME_NIDT_CSI:
1385 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1386 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1387 warn_str, cur->nidl);
1388 return -1;
1389 }
1390 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1391 *csi_seen = true;
1392 return NVME_NIDT_CSI_LEN;
1393 default:
1394
1395 return cur->nidl;
1396 }
1397}
1398
1399static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1400 struct nvme_ns_ids *ids)
1401{
1402 struct nvme_command c = { };
1403 bool csi_seen = false;
1404 int status, pos, len;
1405 void *data;
1406
1407 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1408 return 0;
1409 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1410 return 0;
1411
1412 c.identify.opcode = nvme_admin_identify;
1413 c.identify.nsid = cpu_to_le32(nsid);
1414 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1415
1416 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1417 if (!data)
1418 return -ENOMEM;
1419
1420 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1421 NVME_IDENTIFY_DATA_SIZE);
1422 if (status) {
1423 dev_warn(ctrl->device,
1424 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1425 nsid, status);
1426 goto free_data;
1427 }
1428
1429 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1430 struct nvme_ns_id_desc *cur = data + pos;
1431
1432 if (cur->nidl == 0)
1433 break;
1434
1435 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1436 if (len < 0)
1437 break;
1438
1439 len += sizeof(*cur);
1440 }
1441
1442 if (nvme_multi_css(ctrl) && !csi_seen) {
1443 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1444 nsid);
1445 status = -EINVAL;
1446 }
1447
1448free_data:
1449 kfree(data);
1450 return status;
1451}
1452
1453static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1454 struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1455{
1456 struct nvme_command c = { };
1457 int error;
1458
1459
1460 c.identify.opcode = nvme_admin_identify;
1461 c.identify.nsid = cpu_to_le32(nsid);
1462 c.identify.cns = NVME_ID_CNS_NS;
1463
1464 *id = kmalloc(sizeof(**id), GFP_KERNEL);
1465 if (!*id)
1466 return -ENOMEM;
1467
1468 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1469 if (error) {
1470 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1471 goto out_free_id;
1472 }
1473
1474 error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1475 if ((*id)->ncap == 0)
1476 goto out_free_id;
1477
1478 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1479 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1480 memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1481 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1482 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1483 memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1484
1485 return 0;
1486
1487out_free_id:
1488 kfree(*id);
1489 return error;
1490}
1491
1492static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1493 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1494{
1495 union nvme_result res = { 0 };
1496 struct nvme_command c = { };
1497 int ret;
1498
1499 c.features.opcode = op;
1500 c.features.fid = cpu_to_le32(fid);
1501 c.features.dword11 = cpu_to_le32(dword11);
1502
1503 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1504 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1505 if (ret >= 0 && result)
1506 *result = le32_to_cpu(res.u32);
1507 return ret;
1508}
1509
1510int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1511 unsigned int dword11, void *buffer, size_t buflen,
1512 u32 *result)
1513{
1514 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1515 buflen, result);
1516}
1517EXPORT_SYMBOL_GPL(nvme_set_features);
1518
1519int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1520 unsigned int dword11, void *buffer, size_t buflen,
1521 u32 *result)
1522{
1523 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1524 buflen, result);
1525}
1526EXPORT_SYMBOL_GPL(nvme_get_features);
1527
1528int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1529{
1530 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1531 u32 result;
1532 int status, nr_io_queues;
1533
1534 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1535 &result);
1536 if (status < 0)
1537 return status;
1538
1539
1540
1541
1542
1543
1544 if (status > 0) {
1545 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1546 *count = 0;
1547 } else {
1548 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1549 *count = min(*count, nr_io_queues);
1550 }
1551
1552 return 0;
1553}
1554EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1555
1556#define NVME_AEN_SUPPORTED \
1557 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1558 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1559
1560static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1561{
1562 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1563 int status;
1564
1565 if (!supported_aens)
1566 return;
1567
1568 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1569 NULL, 0, &result);
1570 if (status)
1571 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1572 supported_aens);
1573
1574 queue_work(nvme_wq, &ctrl->async_event_work);
1575}
1576
1577static int nvme_ns_open(struct nvme_ns *ns)
1578{
1579
1580
1581 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1582 goto fail;
1583 if (!nvme_get_ns(ns))
1584 goto fail;
1585 if (!try_module_get(ns->ctrl->ops->module))
1586 goto fail_put_ns;
1587
1588 return 0;
1589
1590fail_put_ns:
1591 nvme_put_ns(ns);
1592fail:
1593 return -ENXIO;
1594}
1595
1596static void nvme_ns_release(struct nvme_ns *ns)
1597{
1598
1599 module_put(ns->ctrl->ops->module);
1600 nvme_put_ns(ns);
1601}
1602
1603static int nvme_open(struct block_device *bdev, fmode_t mode)
1604{
1605 return nvme_ns_open(bdev->bd_disk->private_data);
1606}
1607
1608static void nvme_release(struct gendisk *disk, fmode_t mode)
1609{
1610 nvme_ns_release(disk->private_data);
1611}
1612
1613int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1614{
1615
1616 geo->heads = 1 << 6;
1617 geo->sectors = 1 << 5;
1618 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1619 return 0;
1620}
1621
1622#ifdef CONFIG_BLK_DEV_INTEGRITY
1623static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1624 u32 max_integrity_segments)
1625{
1626 struct blk_integrity integrity = { };
1627
1628 switch (pi_type) {
1629 case NVME_NS_DPS_PI_TYPE3:
1630 integrity.profile = &t10_pi_type3_crc;
1631 integrity.tag_size = sizeof(u16) + sizeof(u32);
1632 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1633 break;
1634 case NVME_NS_DPS_PI_TYPE1:
1635 case NVME_NS_DPS_PI_TYPE2:
1636 integrity.profile = &t10_pi_type1_crc;
1637 integrity.tag_size = sizeof(u16);
1638 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1639 break;
1640 default:
1641 integrity.profile = NULL;
1642 break;
1643 }
1644 integrity.tuple_size = ms;
1645 blk_integrity_register(disk, &integrity);
1646 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1647}
1648#else
1649static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1650 u32 max_integrity_segments)
1651{
1652}
1653#endif
1654
1655static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1656{
1657 struct nvme_ctrl *ctrl = ns->ctrl;
1658 struct request_queue *queue = disk->queue;
1659 u32 size = queue_logical_block_size(queue);
1660
1661 if (ctrl->max_discard_sectors == 0) {
1662 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1663 return;
1664 }
1665
1666 if (ctrl->nr_streams && ns->sws && ns->sgs)
1667 size *= ns->sws * ns->sgs;
1668
1669 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1670 NVME_DSM_MAX_RANGES);
1671
1672 queue->limits.discard_alignment = 0;
1673 queue->limits.discard_granularity = size;
1674
1675
1676 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1677 return;
1678
1679 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1680 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1681
1682 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1683 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1684}
1685
1686static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1687{
1688 return !uuid_is_null(&ids->uuid) ||
1689 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1690 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1691}
1692
1693static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1694{
1695 return uuid_equal(&a->uuid, &b->uuid) &&
1696 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1697 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1698 a->csi == b->csi;
1699}
1700
1701static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1702 u32 *phys_bs, u32 *io_opt)
1703{
1704 struct streams_directive_params s;
1705 int ret;
1706
1707 if (!ctrl->nr_streams)
1708 return 0;
1709
1710 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1711 if (ret)
1712 return ret;
1713
1714 ns->sws = le32_to_cpu(s.sws);
1715 ns->sgs = le16_to_cpu(s.sgs);
1716
1717 if (ns->sws) {
1718 *phys_bs = ns->sws * (1 << ns->lba_shift);
1719 if (ns->sgs)
1720 *io_opt = *phys_bs * ns->sgs;
1721 }
1722
1723 return 0;
1724}
1725
1726static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1727{
1728 struct nvme_ctrl *ctrl = ns->ctrl;
1729
1730
1731
1732
1733
1734 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1735 if (ns->ms == sizeof(struct t10_pi_tuple))
1736 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1737 else
1738 ns->pi_type = 0;
1739
1740 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1741 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1742 return;
1743
1744 if (ctrl->ops->flags & NVME_F_FABRICS) {
1745
1746
1747
1748
1749
1750 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1751 return;
1752
1753 ns->features |= NVME_NS_EXT_LBAS;
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1765 ns->features |= NVME_NS_METADATA_SUPPORTED;
1766 } else {
1767
1768
1769
1770
1771
1772
1773 if (id->flbas & NVME_NS_FLBAS_META_EXT)
1774 ns->features |= NVME_NS_EXT_LBAS;
1775 else
1776 ns->features |= NVME_NS_METADATA_SUPPORTED;
1777 }
1778}
1779
1780static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1781 struct request_queue *q)
1782{
1783 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1784
1785 if (ctrl->max_hw_sectors) {
1786 u32 max_segments =
1787 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1788
1789 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1790 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1791 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1792 }
1793 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1794 blk_queue_dma_alignment(q, 7);
1795 blk_queue_write_cache(q, vwc, vwc);
1796}
1797
1798static void nvme_update_disk_info(struct gendisk *disk,
1799 struct nvme_ns *ns, struct nvme_id_ns *id)
1800{
1801 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1802 unsigned short bs = 1 << ns->lba_shift;
1803 u32 atomic_bs, phys_bs, io_opt = 0;
1804
1805
1806
1807
1808
1809 if (ns->lba_shift > PAGE_SHIFT) {
1810 capacity = 0;
1811 bs = (1 << 9);
1812 }
1813
1814 blk_integrity_unregister(disk);
1815
1816 atomic_bs = phys_bs = bs;
1817 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1818 if (id->nabo == 0) {
1819
1820
1821
1822
1823
1824 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1825 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1826 else
1827 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1828 }
1829
1830 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1831
1832 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1833
1834 io_opt = bs * (1 + le16_to_cpu(id->nows));
1835 }
1836
1837 blk_queue_logical_block_size(disk->queue, bs);
1838
1839
1840
1841
1842
1843 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1844 blk_queue_io_min(disk->queue, phys_bs);
1845 blk_queue_io_opt(disk->queue, io_opt);
1846
1847
1848
1849
1850
1851
1852
1853 if (ns->ms) {
1854 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1855 (ns->features & NVME_NS_METADATA_SUPPORTED))
1856 nvme_init_integrity(disk, ns->ms, ns->pi_type,
1857 ns->ctrl->max_integrity_segments);
1858 else if (!nvme_ns_has_pi(ns))
1859 capacity = 0;
1860 }
1861
1862 set_capacity_and_notify(disk, capacity);
1863
1864 nvme_config_discard(disk, ns);
1865 blk_queue_max_write_zeroes_sectors(disk->queue,
1866 ns->ctrl->max_zeroes_sectors);
1867
1868 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
1869 test_bit(NVME_NS_FORCE_RO, &ns->flags));
1870}
1871
1872static inline bool nvme_first_scan(struct gendisk *disk)
1873{
1874
1875 return !disk_live(disk);
1876}
1877
1878static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1879{
1880 struct nvme_ctrl *ctrl = ns->ctrl;
1881 u32 iob;
1882
1883 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1884 is_power_of_2(ctrl->max_hw_sectors))
1885 iob = ctrl->max_hw_sectors;
1886 else
1887 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1888
1889 if (!iob)
1890 return;
1891
1892 if (!is_power_of_2(iob)) {
1893 if (nvme_first_scan(ns->disk))
1894 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1895 ns->disk->disk_name, iob);
1896 return;
1897 }
1898
1899 if (blk_queue_is_zoned(ns->disk->queue)) {
1900 if (nvme_first_scan(ns->disk))
1901 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1902 ns->disk->disk_name);
1903 return;
1904 }
1905
1906 blk_queue_chunk_sectors(ns->queue, iob);
1907}
1908
1909static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1910{
1911 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1912 int ret;
1913
1914 blk_mq_freeze_queue(ns->disk->queue);
1915 ns->lba_shift = id->lbaf[lbaf].ds;
1916 nvme_set_queue_limits(ns->ctrl, ns->queue);
1917
1918 nvme_configure_metadata(ns, id);
1919 nvme_set_chunk_sectors(ns, id);
1920 nvme_update_disk_info(ns->disk, ns, id);
1921
1922 if (ns->head->ids.csi == NVME_CSI_ZNS) {
1923 ret = nvme_update_zone_info(ns, lbaf);
1924 if (ret)
1925 goto out_unfreeze;
1926 }
1927
1928 set_bit(NVME_NS_READY, &ns->flags);
1929 blk_mq_unfreeze_queue(ns->disk->queue);
1930
1931 if (blk_queue_is_zoned(ns->queue)) {
1932 ret = nvme_revalidate_zones(ns);
1933 if (ret && !nvme_first_scan(ns->disk))
1934 return ret;
1935 }
1936
1937 if (nvme_ns_head_multipath(ns->head)) {
1938 blk_mq_freeze_queue(ns->head->disk->queue);
1939 nvme_update_disk_info(ns->head->disk, ns, id);
1940 nvme_mpath_revalidate_paths(ns);
1941 blk_stack_limits(&ns->head->disk->queue->limits,
1942 &ns->queue->limits, 0);
1943 disk_update_readahead(ns->head->disk);
1944 blk_mq_unfreeze_queue(ns->head->disk->queue);
1945 }
1946 return 0;
1947
1948out_unfreeze:
1949
1950
1951
1952
1953 if (ret == -ENODEV) {
1954 ns->disk->flags |= GENHD_FL_HIDDEN;
1955 set_bit(NVME_NS_READY, &ns->flags);
1956 ret = 0;
1957 }
1958 blk_mq_unfreeze_queue(ns->disk->queue);
1959 return ret;
1960}
1961
1962static char nvme_pr_type(enum pr_type type)
1963{
1964 switch (type) {
1965 case PR_WRITE_EXCLUSIVE:
1966 return 1;
1967 case PR_EXCLUSIVE_ACCESS:
1968 return 2;
1969 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1970 return 3;
1971 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1972 return 4;
1973 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1974 return 5;
1975 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1976 return 6;
1977 default:
1978 return 0;
1979 }
1980};
1981
1982static int nvme_send_ns_head_pr_command(struct block_device *bdev,
1983 struct nvme_command *c, u8 data[16])
1984{
1985 struct nvme_ns_head *head = bdev->bd_disk->private_data;
1986 int srcu_idx = srcu_read_lock(&head->srcu);
1987 struct nvme_ns *ns = nvme_find_path(head);
1988 int ret = -EWOULDBLOCK;
1989
1990 if (ns) {
1991 c->common.nsid = cpu_to_le32(ns->head->ns_id);
1992 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
1993 }
1994 srcu_read_unlock(&head->srcu, srcu_idx);
1995 return ret;
1996}
1997
1998static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
1999 u8 data[16])
2000{
2001 c->common.nsid = cpu_to_le32(ns->head->ns_id);
2002 return nvme_submit_sync_cmd(ns->queue, c, data, 16);
2003}
2004
2005static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2006 u64 key, u64 sa_key, u8 op)
2007{
2008 struct nvme_command c = { };
2009 u8 data[16] = { 0, };
2010
2011 put_unaligned_le64(key, &data[0]);
2012 put_unaligned_le64(sa_key, &data[8]);
2013
2014 c.common.opcode = op;
2015 c.common.cdw10 = cpu_to_le32(cdw10);
2016
2017 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
2018 bdev->bd_disk->fops == &nvme_ns_head_ops)
2019 return nvme_send_ns_head_pr_command(bdev, &c, data);
2020 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
2021}
2022
2023static int nvme_pr_register(struct block_device *bdev, u64 old,
2024 u64 new, unsigned flags)
2025{
2026 u32 cdw10;
2027
2028 if (flags & ~PR_FL_IGNORE_KEY)
2029 return -EOPNOTSUPP;
2030
2031 cdw10 = old ? 2 : 0;
2032 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2033 cdw10 |= (1 << 30) | (1 << 31);
2034 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2035}
2036
2037static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2038 enum pr_type type, unsigned flags)
2039{
2040 u32 cdw10;
2041
2042 if (flags & ~PR_FL_IGNORE_KEY)
2043 return -EOPNOTSUPP;
2044
2045 cdw10 = nvme_pr_type(type) << 8;
2046 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2047 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2048}
2049
2050static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2051 enum pr_type type, bool abort)
2052{
2053 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2054
2055 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2056}
2057
2058static int nvme_pr_clear(struct block_device *bdev, u64 key)
2059{
2060 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2061
2062 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2063}
2064
2065static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2066{
2067 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2068
2069 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2070}
2071
2072const struct pr_ops nvme_pr_ops = {
2073 .pr_register = nvme_pr_register,
2074 .pr_reserve = nvme_pr_reserve,
2075 .pr_release = nvme_pr_release,
2076 .pr_preempt = nvme_pr_preempt,
2077 .pr_clear = nvme_pr_clear,
2078};
2079
2080#ifdef CONFIG_BLK_SED_OPAL
2081int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2082 bool send)
2083{
2084 struct nvme_ctrl *ctrl = data;
2085 struct nvme_command cmd = { };
2086
2087 if (send)
2088 cmd.common.opcode = nvme_admin_security_send;
2089 else
2090 cmd.common.opcode = nvme_admin_security_recv;
2091 cmd.common.nsid = 0;
2092 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2093 cmd.common.cdw11 = cpu_to_le32(len);
2094
2095 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
2096 NVME_QID_ANY, 1, 0);
2097}
2098EXPORT_SYMBOL_GPL(nvme_sec_submit);
2099#endif
2100
2101#ifdef CONFIG_BLK_DEV_ZONED
2102static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2103 unsigned int nr_zones, report_zones_cb cb, void *data)
2104{
2105 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2106 data);
2107}
2108#else
2109#define nvme_report_zones NULL
2110#endif
2111
2112static const struct block_device_operations nvme_bdev_ops = {
2113 .owner = THIS_MODULE,
2114 .ioctl = nvme_ioctl,
2115 .open = nvme_open,
2116 .release = nvme_release,
2117 .getgeo = nvme_getgeo,
2118 .report_zones = nvme_report_zones,
2119 .pr_ops = &nvme_pr_ops,
2120};
2121
2122static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2123{
2124 unsigned long timeout =
2125 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2126 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2127 int ret;
2128
2129 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2130 if (csts == ~0)
2131 return -ENODEV;
2132 if ((csts & NVME_CSTS_RDY) == bit)
2133 break;
2134
2135 usleep_range(1000, 2000);
2136 if (fatal_signal_pending(current))
2137 return -EINTR;
2138 if (time_after(jiffies, timeout)) {
2139 dev_err(ctrl->device,
2140 "Device not ready; aborting %s, CSTS=0x%x\n",
2141 enabled ? "initialisation" : "reset", csts);
2142 return -ENODEV;
2143 }
2144 }
2145
2146 return ret;
2147}
2148
2149
2150
2151
2152
2153
2154
2155int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2156{
2157 int ret;
2158
2159 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2160 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2161
2162 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2163 if (ret)
2164 return ret;
2165
2166 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2167 msleep(NVME_QUIRK_DELAY_AMOUNT);
2168
2169 return nvme_wait_ready(ctrl, ctrl->cap, false);
2170}
2171EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2172
2173int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2174{
2175 unsigned dev_page_min;
2176 int ret;
2177
2178 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2179 if (ret) {
2180 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2181 return ret;
2182 }
2183 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2184
2185 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2186 dev_err(ctrl->device,
2187 "Minimum device page size %u too large for host (%u)\n",
2188 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2189 return -ENODEV;
2190 }
2191
2192 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2193 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2194 else
2195 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2196 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2197 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2198 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2199 ctrl->ctrl_config |= NVME_CC_ENABLE;
2200
2201 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2202 if (ret)
2203 return ret;
2204 return nvme_wait_ready(ctrl, ctrl->cap, true);
2205}
2206EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2207
2208int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2209{
2210 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2211 u32 csts;
2212 int ret;
2213
2214 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2215 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2216
2217 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2218 if (ret)
2219 return ret;
2220
2221 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2222 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2223 break;
2224
2225 msleep(100);
2226 if (fatal_signal_pending(current))
2227 return -EINTR;
2228 if (time_after(jiffies, timeout)) {
2229 dev_err(ctrl->device,
2230 "Device shutdown incomplete; abort shutdown\n");
2231 return -ENODEV;
2232 }
2233 }
2234
2235 return ret;
2236}
2237EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2238
2239static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2240{
2241 __le64 ts;
2242 int ret;
2243
2244 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2245 return 0;
2246
2247 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2248 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2249 NULL);
2250 if (ret)
2251 dev_warn_once(ctrl->device,
2252 "could not set timestamp (%d)\n", ret);
2253 return ret;
2254}
2255
2256static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2257{
2258 struct nvme_feat_host_behavior *host;
2259 int ret;
2260
2261
2262 if (!ctrl->crdt[0])
2263 return 0;
2264
2265 host = kzalloc(sizeof(*host), GFP_KERNEL);
2266 if (!host)
2267 return 0;
2268
2269 host->acre = NVME_ENABLE_ACRE;
2270 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2271 host, sizeof(*host), NULL);
2272 kfree(host);
2273 return ret;
2274}
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284static bool nvme_apst_get_transition_time(u64 total_latency,
2285 u64 *transition_time, unsigned *last_index)
2286{
2287 if (total_latency <= apst_primary_latency_tol_us) {
2288 if (*last_index == 1)
2289 return false;
2290 *last_index = 1;
2291 *transition_time = apst_primary_timeout_ms;
2292 return true;
2293 }
2294 if (apst_secondary_timeout_ms &&
2295 total_latency <= apst_secondary_latency_tol_us) {
2296 if (*last_index <= 2)
2297 return false;
2298 *last_index = 2;
2299 *transition_time = apst_secondary_timeout_ms;
2300 return true;
2301 }
2302 return false;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2331{
2332 struct nvme_feat_auto_pst *table;
2333 unsigned apste = 0;
2334 u64 max_lat_us = 0;
2335 __le64 target = 0;
2336 int max_ps = -1;
2337 int state;
2338 int ret;
2339 unsigned last_lt_index = UINT_MAX;
2340
2341
2342
2343
2344
2345 if (!ctrl->apsta)
2346 return 0;
2347
2348 if (ctrl->npss > 31) {
2349 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2350 return 0;
2351 }
2352
2353 table = kzalloc(sizeof(*table), GFP_KERNEL);
2354 if (!table)
2355 return 0;
2356
2357 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2358
2359 dev_dbg(ctrl->device, "APST disabled\n");
2360 goto done;
2361 }
2362
2363
2364
2365
2366
2367
2368
2369 for (state = (int)ctrl->npss; state >= 0; state--) {
2370 u64 total_latency_us, exit_latency_us, transition_ms;
2371
2372 if (target)
2373 table->entries[state] = target;
2374
2375
2376
2377
2378
2379 if (state == ctrl->npss &&
2380 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2381 continue;
2382
2383
2384
2385
2386
2387 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2388 continue;
2389
2390 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2391 if (exit_latency_us > ctrl->ps_max_latency_us)
2392 continue;
2393
2394 total_latency_us = exit_latency_us +
2395 le32_to_cpu(ctrl->psd[state].entry_lat);
2396
2397
2398
2399
2400
2401 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2402 if (!nvme_apst_get_transition_time(total_latency_us,
2403 &transition_ms, &last_lt_index))
2404 continue;
2405 } else {
2406 transition_ms = total_latency_us + 19;
2407 do_div(transition_ms, 20);
2408 if (transition_ms > (1 << 24) - 1)
2409 transition_ms = (1 << 24) - 1;
2410 }
2411
2412 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2413 if (max_ps == -1)
2414 max_ps = state;
2415 if (total_latency_us > max_lat_us)
2416 max_lat_us = total_latency_us;
2417 }
2418
2419 if (max_ps == -1)
2420 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2421 else
2422 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2423 max_ps, max_lat_us, (int)sizeof(*table), table);
2424 apste = 1;
2425
2426done:
2427 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2428 table, sizeof(*table), NULL);
2429 if (ret)
2430 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2431 kfree(table);
2432 return ret;
2433}
2434
2435static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2436{
2437 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2438 u64 latency;
2439
2440 switch (val) {
2441 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2442 case PM_QOS_LATENCY_ANY:
2443 latency = U64_MAX;
2444 break;
2445
2446 default:
2447 latency = val;
2448 }
2449
2450 if (ctrl->ps_max_latency_us != latency) {
2451 ctrl->ps_max_latency_us = latency;
2452 if (ctrl->state == NVME_CTRL_LIVE)
2453 nvme_configure_apst(ctrl);
2454 }
2455}
2456
2457struct nvme_core_quirk_entry {
2458
2459
2460
2461
2462
2463 u16 vid;
2464 const char *mn;
2465 const char *fr;
2466 unsigned long quirks;
2467};
2468
2469static const struct nvme_core_quirk_entry core_quirks[] = {
2470 {
2471
2472
2473
2474
2475 .vid = 0x1179,
2476 .mn = "THNSF5256GPUK TOSHIBA",
2477 .quirks = NVME_QUIRK_NO_APST,
2478 },
2479 {
2480
2481
2482
2483
2484
2485 .vid = 0x14a4,
2486 .fr = "22301111",
2487 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2488 },
2489 {
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499 .vid = 0x1e0f,
2500 .mn = "KCD6XVUL6T40",
2501 .quirks = NVME_QUIRK_NO_APST,
2502 }
2503};
2504
2505
2506static bool string_matches(const char *idstr, const char *match, size_t len)
2507{
2508 size_t matchlen;
2509
2510 if (!match)
2511 return true;
2512
2513 matchlen = strlen(match);
2514 WARN_ON_ONCE(matchlen > len);
2515
2516 if (memcmp(idstr, match, matchlen))
2517 return false;
2518
2519 for (; matchlen < len; matchlen++)
2520 if (idstr[matchlen] != ' ')
2521 return false;
2522
2523 return true;
2524}
2525
2526static bool quirk_matches(const struct nvme_id_ctrl *id,
2527 const struct nvme_core_quirk_entry *q)
2528{
2529 return q->vid == le16_to_cpu(id->vid) &&
2530 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2531 string_matches(id->fr, q->fr, sizeof(id->fr));
2532}
2533
2534static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2535 struct nvme_id_ctrl *id)
2536{
2537 size_t nqnlen;
2538 int off;
2539
2540 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2541 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2542 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2543 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2544 return;
2545 }
2546
2547 if (ctrl->vs >= NVME_VS(1, 2, 1))
2548 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2549 }
2550
2551
2552 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2553 "nqn.2014.08.org.nvmexpress:%04x%04x",
2554 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2555 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2556 off += sizeof(id->sn);
2557 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2558 off += sizeof(id->mn);
2559 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2560}
2561
2562static void nvme_release_subsystem(struct device *dev)
2563{
2564 struct nvme_subsystem *subsys =
2565 container_of(dev, struct nvme_subsystem, dev);
2566
2567 if (subsys->instance >= 0)
2568 ida_simple_remove(&nvme_instance_ida, subsys->instance);
2569 kfree(subsys);
2570}
2571
2572static void nvme_destroy_subsystem(struct kref *ref)
2573{
2574 struct nvme_subsystem *subsys =
2575 container_of(ref, struct nvme_subsystem, ref);
2576
2577 mutex_lock(&nvme_subsystems_lock);
2578 list_del(&subsys->entry);
2579 mutex_unlock(&nvme_subsystems_lock);
2580
2581 ida_destroy(&subsys->ns_ida);
2582 device_del(&subsys->dev);
2583 put_device(&subsys->dev);
2584}
2585
2586static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2587{
2588 kref_put(&subsys->ref, nvme_destroy_subsystem);
2589}
2590
2591static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2592{
2593 struct nvme_subsystem *subsys;
2594
2595 lockdep_assert_held(&nvme_subsystems_lock);
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2606 return NULL;
2607
2608 list_for_each_entry(subsys, &nvme_subsystems, entry) {
2609 if (strcmp(subsys->subnqn, subsysnqn))
2610 continue;
2611 if (!kref_get_unless_zero(&subsys->ref))
2612 continue;
2613 return subsys;
2614 }
2615
2616 return NULL;
2617}
2618
2619#define SUBSYS_ATTR_RO(_name, _mode, _show) \
2620 struct device_attribute subsys_attr_##_name = \
2621 __ATTR(_name, _mode, _show, NULL)
2622
2623static ssize_t nvme_subsys_show_nqn(struct device *dev,
2624 struct device_attribute *attr,
2625 char *buf)
2626{
2627 struct nvme_subsystem *subsys =
2628 container_of(dev, struct nvme_subsystem, dev);
2629
2630 return sysfs_emit(buf, "%s\n", subsys->subnqn);
2631}
2632static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2633
2634static ssize_t nvme_subsys_show_type(struct device *dev,
2635 struct device_attribute *attr,
2636 char *buf)
2637{
2638 struct nvme_subsystem *subsys =
2639 container_of(dev, struct nvme_subsystem, dev);
2640
2641 switch (subsys->subtype) {
2642 case NVME_NQN_DISC:
2643 return sysfs_emit(buf, "discovery\n");
2644 case NVME_NQN_NVME:
2645 return sysfs_emit(buf, "nvm\n");
2646 default:
2647 return sysfs_emit(buf, "reserved\n");
2648 }
2649}
2650static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
2651
2652#define nvme_subsys_show_str_function(field) \
2653static ssize_t subsys_##field##_show(struct device *dev, \
2654 struct device_attribute *attr, char *buf) \
2655{ \
2656 struct nvme_subsystem *subsys = \
2657 container_of(dev, struct nvme_subsystem, dev); \
2658 return sysfs_emit(buf, "%.*s\n", \
2659 (int)sizeof(subsys->field), subsys->field); \
2660} \
2661static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2662
2663nvme_subsys_show_str_function(model);
2664nvme_subsys_show_str_function(serial);
2665nvme_subsys_show_str_function(firmware_rev);
2666
2667static struct attribute *nvme_subsys_attrs[] = {
2668 &subsys_attr_model.attr,
2669 &subsys_attr_serial.attr,
2670 &subsys_attr_firmware_rev.attr,
2671 &subsys_attr_subsysnqn.attr,
2672 &subsys_attr_subsystype.attr,
2673#ifdef CONFIG_NVME_MULTIPATH
2674 &subsys_attr_iopolicy.attr,
2675#endif
2676 NULL,
2677};
2678
2679static const struct attribute_group nvme_subsys_attrs_group = {
2680 .attrs = nvme_subsys_attrs,
2681};
2682
2683static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2684 &nvme_subsys_attrs_group,
2685 NULL,
2686};
2687
2688static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2689{
2690 return ctrl->opts && ctrl->opts->discovery_nqn;
2691}
2692
2693static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2694 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2695{
2696 struct nvme_ctrl *tmp;
2697
2698 lockdep_assert_held(&nvme_subsystems_lock);
2699
2700 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2701 if (nvme_state_terminal(tmp))
2702 continue;
2703
2704 if (tmp->cntlid == ctrl->cntlid) {
2705 dev_err(ctrl->device,
2706 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2707 ctrl->cntlid, dev_name(tmp->device),
2708 subsys->subnqn);
2709 return false;
2710 }
2711
2712 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2713 nvme_discovery_ctrl(ctrl))
2714 continue;
2715
2716 dev_err(ctrl->device,
2717 "Subsystem does not support multiple controllers\n");
2718 return false;
2719 }
2720
2721 return true;
2722}
2723
2724static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2725{
2726 struct nvme_subsystem *subsys, *found;
2727 int ret;
2728
2729 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2730 if (!subsys)
2731 return -ENOMEM;
2732
2733 subsys->instance = -1;
2734 mutex_init(&subsys->lock);
2735 kref_init(&subsys->ref);
2736 INIT_LIST_HEAD(&subsys->ctrls);
2737 INIT_LIST_HEAD(&subsys->nsheads);
2738 nvme_init_subnqn(subsys, ctrl, id);
2739 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2740 memcpy(subsys->model, id->mn, sizeof(subsys->model));
2741 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2742 subsys->vendor_id = le16_to_cpu(id->vid);
2743 subsys->cmic = id->cmic;
2744
2745
2746 if (id->cntrltype == NVME_CTRL_DISC ||
2747 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
2748 subsys->subtype = NVME_NQN_DISC;
2749 else
2750 subsys->subtype = NVME_NQN_NVME;
2751
2752 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2753 dev_err(ctrl->device,
2754 "Subsystem %s is not a discovery controller",
2755 subsys->subnqn);
2756 kfree(subsys);
2757 return -EINVAL;
2758 }
2759 subsys->awupf = le16_to_cpu(id->awupf);
2760 nvme_mpath_default_iopolicy(subsys);
2761
2762 subsys->dev.class = nvme_subsys_class;
2763 subsys->dev.release = nvme_release_subsystem;
2764 subsys->dev.groups = nvme_subsys_attrs_groups;
2765 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2766 device_initialize(&subsys->dev);
2767
2768 mutex_lock(&nvme_subsystems_lock);
2769 found = __nvme_find_get_subsystem(subsys->subnqn);
2770 if (found) {
2771 put_device(&subsys->dev);
2772 subsys = found;
2773
2774 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2775 ret = -EINVAL;
2776 goto out_put_subsystem;
2777 }
2778 } else {
2779 ret = device_add(&subsys->dev);
2780 if (ret) {
2781 dev_err(ctrl->device,
2782 "failed to register subsystem device.\n");
2783 put_device(&subsys->dev);
2784 goto out_unlock;
2785 }
2786 ida_init(&subsys->ns_ida);
2787 list_add_tail(&subsys->entry, &nvme_subsystems);
2788 }
2789
2790 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2791 dev_name(ctrl->device));
2792 if (ret) {
2793 dev_err(ctrl->device,
2794 "failed to create sysfs link from subsystem.\n");
2795 goto out_put_subsystem;
2796 }
2797
2798 if (!found)
2799 subsys->instance = ctrl->instance;
2800 ctrl->subsys = subsys;
2801 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2802 mutex_unlock(&nvme_subsystems_lock);
2803 return 0;
2804
2805out_put_subsystem:
2806 nvme_put_subsystem(subsys);
2807out_unlock:
2808 mutex_unlock(&nvme_subsystems_lock);
2809 return ret;
2810}
2811
2812int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2813 void *log, size_t size, u64 offset)
2814{
2815 struct nvme_command c = { };
2816 u32 dwlen = nvme_bytes_to_numd(size);
2817
2818 c.get_log_page.opcode = nvme_admin_get_log_page;
2819 c.get_log_page.nsid = cpu_to_le32(nsid);
2820 c.get_log_page.lid = log_page;
2821 c.get_log_page.lsp = lsp;
2822 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2823 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2824 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2825 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2826 c.get_log_page.csi = csi;
2827
2828 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2829}
2830
2831static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2832 struct nvme_effects_log **log)
2833{
2834 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
2835 int ret;
2836
2837 if (cel)
2838 goto out;
2839
2840 cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2841 if (!cel)
2842 return -ENOMEM;
2843
2844 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2845 cel, sizeof(*cel), 0);
2846 if (ret) {
2847 kfree(cel);
2848 return ret;
2849 }
2850
2851 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2852out:
2853 *log = cel;
2854 return 0;
2855}
2856
2857static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2858{
2859 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2860
2861 if (check_shl_overflow(1U, units + page_shift - 9, &val))
2862 return UINT_MAX;
2863 return val;
2864}
2865
2866static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2867{
2868 struct nvme_command c = { };
2869 struct nvme_id_ctrl_nvm *id;
2870 int ret;
2871
2872 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2873 ctrl->max_discard_sectors = UINT_MAX;
2874 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2875 } else {
2876 ctrl->max_discard_sectors = 0;
2877 ctrl->max_discard_segments = 0;
2878 }
2879
2880
2881
2882
2883
2884
2885
2886 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2887 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2888 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2889 else
2890 ctrl->max_zeroes_sectors = 0;
2891
2892 if (nvme_ctrl_limited_cns(ctrl))
2893 return 0;
2894
2895 id = kzalloc(sizeof(*id), GFP_KERNEL);
2896 if (!id)
2897 return 0;
2898
2899 c.identify.opcode = nvme_admin_identify;
2900 c.identify.cns = NVME_ID_CNS_CS_CTRL;
2901 c.identify.csi = NVME_CSI_NVM;
2902
2903 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2904 if (ret)
2905 goto free_data;
2906
2907 if (id->dmrl)
2908 ctrl->max_discard_segments = id->dmrl;
2909 if (id->dmrsl)
2910 ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
2911 if (id->wzsl)
2912 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2913
2914free_data:
2915 kfree(id);
2916 return ret;
2917}
2918
2919static int nvme_init_identify(struct nvme_ctrl *ctrl)
2920{
2921 struct nvme_id_ctrl *id;
2922 u32 max_hw_sectors;
2923 bool prev_apst_enabled;
2924 int ret;
2925
2926 ret = nvme_identify_ctrl(ctrl, &id);
2927 if (ret) {
2928 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2929 return -EIO;
2930 }
2931
2932 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2933 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2934 if (ret < 0)
2935 goto out_free;
2936 }
2937
2938 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2939 ctrl->cntlid = le16_to_cpu(id->cntlid);
2940
2941 if (!ctrl->identified) {
2942 unsigned int i;
2943
2944 ret = nvme_init_subsystem(ctrl, id);
2945 if (ret)
2946 goto out_free;
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2957 if (quirk_matches(id, &core_quirks[i]))
2958 ctrl->quirks |= core_quirks[i].quirks;
2959 }
2960 }
2961
2962 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2963 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2964 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2965 }
2966
2967 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2968 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2969 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2970
2971 ctrl->oacs = le16_to_cpu(id->oacs);
2972 ctrl->oncs = le16_to_cpu(id->oncs);
2973 ctrl->mtfa = le16_to_cpu(id->mtfa);
2974 ctrl->oaes = le32_to_cpu(id->oaes);
2975 ctrl->wctemp = le16_to_cpu(id->wctemp);
2976 ctrl->cctemp = le16_to_cpu(id->cctemp);
2977
2978 atomic_set(&ctrl->abort_limit, id->acl + 1);
2979 ctrl->vwc = id->vwc;
2980 if (id->mdts)
2981 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
2982 else
2983 max_hw_sectors = UINT_MAX;
2984 ctrl->max_hw_sectors =
2985 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2986
2987 nvme_set_queue_limits(ctrl, ctrl->admin_q);
2988 ctrl->sgls = le32_to_cpu(id->sgls);
2989 ctrl->kas = le16_to_cpu(id->kas);
2990 ctrl->max_namespaces = le32_to_cpu(id->mnan);
2991 ctrl->ctratt = le32_to_cpu(id->ctratt);
2992
2993 if (id->rtd3e) {
2994
2995 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
2996
2997 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2998 shutdown_timeout, 60);
2999
3000 if (ctrl->shutdown_timeout != shutdown_timeout)
3001 dev_info(ctrl->device,
3002 "Shutdown timeout set to %u seconds\n",
3003 ctrl->shutdown_timeout);
3004 } else
3005 ctrl->shutdown_timeout = shutdown_timeout;
3006
3007 ctrl->npss = id->npss;
3008 ctrl->apsta = id->apsta;
3009 prev_apst_enabled = ctrl->apst_enabled;
3010 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3011 if (force_apst && id->apsta) {
3012 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3013 ctrl->apst_enabled = true;
3014 } else {
3015 ctrl->apst_enabled = false;
3016 }
3017 } else {
3018 ctrl->apst_enabled = id->apsta;
3019 }
3020 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3021
3022 if (ctrl->ops->flags & NVME_F_FABRICS) {
3023 ctrl->icdoff = le16_to_cpu(id->icdoff);
3024 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3025 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3026 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3027
3028
3029
3030
3031
3032 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3033 dev_err(ctrl->device,
3034 "Mismatching cntlid: Connect %u vs Identify "
3035 "%u, rejecting\n",
3036 ctrl->cntlid, le16_to_cpu(id->cntlid));
3037 ret = -EINVAL;
3038 goto out_free;
3039 }
3040
3041 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3042 dev_err(ctrl->device,
3043 "keep-alive support is mandatory for fabrics\n");
3044 ret = -EINVAL;
3045 goto out_free;
3046 }
3047 } else {
3048 ctrl->hmpre = le32_to_cpu(id->hmpre);
3049 ctrl->hmmin = le32_to_cpu(id->hmmin);
3050 ctrl->hmminds = le32_to_cpu(id->hmminds);
3051 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3052 }
3053
3054 ret = nvme_mpath_init_identify(ctrl, id);
3055 if (ret < 0)
3056 goto out_free;
3057
3058 if (ctrl->apst_enabled && !prev_apst_enabled)
3059 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3060 else if (!ctrl->apst_enabled && prev_apst_enabled)
3061 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3062
3063out_free:
3064 kfree(id);
3065 return ret;
3066}
3067
3068
3069
3070
3071
3072
3073int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
3074{
3075 int ret;
3076
3077 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3078 if (ret) {
3079 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3080 return ret;
3081 }
3082
3083 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3084
3085 if (ctrl->vs >= NVME_VS(1, 1, 0))
3086 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3087
3088 ret = nvme_init_identify(ctrl);
3089 if (ret)
3090 return ret;
3091
3092 ret = nvme_init_non_mdts_limits(ctrl);
3093 if (ret < 0)
3094 return ret;
3095
3096 ret = nvme_configure_apst(ctrl);
3097 if (ret < 0)
3098 return ret;
3099
3100 ret = nvme_configure_timestamp(ctrl);
3101 if (ret < 0)
3102 return ret;
3103
3104 ret = nvme_configure_directives(ctrl);
3105 if (ret < 0)
3106 return ret;
3107
3108 ret = nvme_configure_acre(ctrl);
3109 if (ret < 0)
3110 return ret;
3111
3112 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3113 ret = nvme_hwmon_init(ctrl);
3114 if (ret < 0)
3115 return ret;
3116 }
3117
3118 ctrl->identified = true;
3119
3120 return 0;
3121}
3122EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3123
3124static int nvme_dev_open(struct inode *inode, struct file *file)
3125{
3126 struct nvme_ctrl *ctrl =
3127 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3128
3129 switch (ctrl->state) {
3130 case NVME_CTRL_LIVE:
3131 break;
3132 default:
3133 return -EWOULDBLOCK;
3134 }
3135
3136 nvme_get_ctrl(ctrl);
3137 if (!try_module_get(ctrl->ops->module)) {
3138 nvme_put_ctrl(ctrl);
3139 return -EINVAL;
3140 }
3141
3142 file->private_data = ctrl;
3143 return 0;
3144}
3145
3146static int nvme_dev_release(struct inode *inode, struct file *file)
3147{
3148 struct nvme_ctrl *ctrl =
3149 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3150
3151 module_put(ctrl->ops->module);
3152 nvme_put_ctrl(ctrl);
3153 return 0;
3154}
3155
3156static const struct file_operations nvme_dev_fops = {
3157 .owner = THIS_MODULE,
3158 .open = nvme_dev_open,
3159 .release = nvme_dev_release,
3160 .unlocked_ioctl = nvme_dev_ioctl,
3161 .compat_ioctl = compat_ptr_ioctl,
3162};
3163
3164static ssize_t nvme_sysfs_reset(struct device *dev,
3165 struct device_attribute *attr, const char *buf,
3166 size_t count)
3167{
3168 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3169 int ret;
3170
3171 ret = nvme_reset_ctrl_sync(ctrl);
3172 if (ret < 0)
3173 return ret;
3174 return count;
3175}
3176static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3177
3178static ssize_t nvme_sysfs_rescan(struct device *dev,
3179 struct device_attribute *attr, const char *buf,
3180 size_t count)
3181{
3182 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3183
3184 nvme_queue_scan(ctrl);
3185 return count;
3186}
3187static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3188
3189static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3190{
3191 struct gendisk *disk = dev_to_disk(dev);
3192
3193 if (disk->fops == &nvme_bdev_ops)
3194 return nvme_get_ns_from_dev(dev)->head;
3195 else
3196 return disk->private_data;
3197}
3198
3199static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3200 char *buf)
3201{
3202 struct nvme_ns_head *head = dev_to_ns_head(dev);
3203 struct nvme_ns_ids *ids = &head->ids;
3204 struct nvme_subsystem *subsys = head->subsys;
3205 int serial_len = sizeof(subsys->serial);
3206 int model_len = sizeof(subsys->model);
3207
3208 if (!uuid_is_null(&ids->uuid))
3209 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3210
3211 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3212 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3213
3214 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3215 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3216
3217 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3218 subsys->serial[serial_len - 1] == '\0'))
3219 serial_len--;
3220 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3221 subsys->model[model_len - 1] == '\0'))
3222 model_len--;
3223
3224 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3225 serial_len, subsys->serial, model_len, subsys->model,
3226 head->ns_id);
3227}
3228static DEVICE_ATTR_RO(wwid);
3229
3230static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3231 char *buf)
3232{
3233 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3234}
3235static DEVICE_ATTR_RO(nguid);
3236
3237static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3238 char *buf)
3239{
3240 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3241
3242
3243
3244
3245 if (uuid_is_null(&ids->uuid)) {
3246 printk_ratelimited(KERN_WARNING
3247 "No UUID available providing old NGUID\n");
3248 return sysfs_emit(buf, "%pU\n", ids->nguid);
3249 }
3250 return sysfs_emit(buf, "%pU\n", &ids->uuid);
3251}
3252static DEVICE_ATTR_RO(uuid);
3253
3254static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3255 char *buf)
3256{
3257 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3258}
3259static DEVICE_ATTR_RO(eui);
3260
3261static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3262 char *buf)
3263{
3264 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3265}
3266static DEVICE_ATTR_RO(nsid);
3267
3268static struct attribute *nvme_ns_id_attrs[] = {
3269 &dev_attr_wwid.attr,
3270 &dev_attr_uuid.attr,
3271 &dev_attr_nguid.attr,
3272 &dev_attr_eui.attr,
3273 &dev_attr_nsid.attr,
3274#ifdef CONFIG_NVME_MULTIPATH
3275 &dev_attr_ana_grpid.attr,
3276 &dev_attr_ana_state.attr,
3277#endif
3278 NULL,
3279};
3280
3281static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3282 struct attribute *a, int n)
3283{
3284 struct device *dev = container_of(kobj, struct device, kobj);
3285 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3286
3287 if (a == &dev_attr_uuid.attr) {
3288 if (uuid_is_null(&ids->uuid) &&
3289 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3290 return 0;
3291 }
3292 if (a == &dev_attr_nguid.attr) {
3293 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3294 return 0;
3295 }
3296 if (a == &dev_attr_eui.attr) {
3297 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3298 return 0;
3299 }
3300#ifdef CONFIG_NVME_MULTIPATH
3301 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3302 if (dev_to_disk(dev)->fops != &nvme_bdev_ops)
3303 return 0;
3304 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3305 return 0;
3306 }
3307#endif
3308 return a->mode;
3309}
3310
3311static const struct attribute_group nvme_ns_id_attr_group = {
3312 .attrs = nvme_ns_id_attrs,
3313 .is_visible = nvme_ns_id_attrs_are_visible,
3314};
3315
3316const struct attribute_group *nvme_ns_id_attr_groups[] = {
3317 &nvme_ns_id_attr_group,
3318 NULL,
3319};
3320
3321#define nvme_show_str_function(field) \
3322static ssize_t field##_show(struct device *dev, \
3323 struct device_attribute *attr, char *buf) \
3324{ \
3325 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3326 return sysfs_emit(buf, "%.*s\n", \
3327 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3328} \
3329static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3330
3331nvme_show_str_function(model);
3332nvme_show_str_function(serial);
3333nvme_show_str_function(firmware_rev);
3334
3335#define nvme_show_int_function(field) \
3336static ssize_t field##_show(struct device *dev, \
3337 struct device_attribute *attr, char *buf) \
3338{ \
3339 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3340 return sysfs_emit(buf, "%d\n", ctrl->field); \
3341} \
3342static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3343
3344nvme_show_int_function(cntlid);
3345nvme_show_int_function(numa_node);
3346nvme_show_int_function(queue_count);
3347nvme_show_int_function(sqsize);
3348nvme_show_int_function(kato);
3349
3350static ssize_t nvme_sysfs_delete(struct device *dev,
3351 struct device_attribute *attr, const char *buf,
3352 size_t count)
3353{
3354 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3355
3356 if (device_remove_file_self(dev, attr))
3357 nvme_delete_ctrl_sync(ctrl);
3358 return count;
3359}
3360static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3361
3362static ssize_t nvme_sysfs_show_transport(struct device *dev,
3363 struct device_attribute *attr,
3364 char *buf)
3365{
3366 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3367
3368 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
3369}
3370static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3371
3372static ssize_t nvme_sysfs_show_state(struct device *dev,
3373 struct device_attribute *attr,
3374 char *buf)
3375{
3376 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3377 static const char *const state_name[] = {
3378 [NVME_CTRL_NEW] = "new",
3379 [NVME_CTRL_LIVE] = "live",
3380 [NVME_CTRL_RESETTING] = "resetting",
3381 [NVME_CTRL_CONNECTING] = "connecting",
3382 [NVME_CTRL_DELETING] = "deleting",
3383 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3384 [NVME_CTRL_DEAD] = "dead",
3385 };
3386
3387 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3388 state_name[ctrl->state])
3389 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3390
3391 return sysfs_emit(buf, "unknown state\n");
3392}
3393
3394static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3395
3396static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3397 struct device_attribute *attr,
3398 char *buf)
3399{
3400 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3401
3402 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
3403}
3404static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3405
3406static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3407 struct device_attribute *attr,
3408 char *buf)
3409{
3410 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3411
3412 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3413}
3414static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3415
3416static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3417 struct device_attribute *attr,
3418 char *buf)
3419{
3420 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3421
3422 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3423}
3424static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3425
3426static ssize_t nvme_sysfs_show_address(struct device *dev,
3427 struct device_attribute *attr,
3428 char *buf)
3429{
3430 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3431
3432 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3433}
3434static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3435
3436static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
3437 struct device_attribute *attr, char *buf)
3438{
3439 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3440 struct nvmf_ctrl_options *opts = ctrl->opts;
3441
3442 if (ctrl->opts->max_reconnects == -1)
3443 return sysfs_emit(buf, "off\n");
3444 return sysfs_emit(buf, "%d\n",
3445 opts->max_reconnects * opts->reconnect_delay);
3446}
3447
3448static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
3449 struct device_attribute *attr, const char *buf, size_t count)
3450{
3451 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3452 struct nvmf_ctrl_options *opts = ctrl->opts;
3453 int ctrl_loss_tmo, err;
3454
3455 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
3456 if (err)
3457 return -EINVAL;
3458
3459 if (ctrl_loss_tmo < 0)
3460 opts->max_reconnects = -1;
3461 else
3462 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3463 opts->reconnect_delay);
3464 return count;
3465}
3466static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
3467 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
3468
3469static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
3470 struct device_attribute *attr, char *buf)
3471{
3472 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3473
3474 if (ctrl->opts->reconnect_delay == -1)
3475 return sysfs_emit(buf, "off\n");
3476 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3477}
3478
3479static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
3480 struct device_attribute *attr, const char *buf, size_t count)
3481{
3482 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3483 unsigned int v;
3484 int err;
3485
3486 err = kstrtou32(buf, 10, &v);
3487 if (err)
3488 return err;
3489
3490 ctrl->opts->reconnect_delay = v;
3491 return count;
3492}
3493static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
3494 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
3495
3496static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
3497 struct device_attribute *attr, char *buf)
3498{
3499 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3500
3501 if (ctrl->opts->fast_io_fail_tmo == -1)
3502 return sysfs_emit(buf, "off\n");
3503 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
3504}
3505
3506static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
3507 struct device_attribute *attr, const char *buf, size_t count)
3508{
3509 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3510 struct nvmf_ctrl_options *opts = ctrl->opts;
3511 int fast_io_fail_tmo, err;
3512
3513 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
3514 if (err)
3515 return -EINVAL;
3516
3517 if (fast_io_fail_tmo < 0)
3518 opts->fast_io_fail_tmo = -1;
3519 else
3520 opts->fast_io_fail_tmo = fast_io_fail_tmo;
3521 return count;
3522}
3523static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
3524 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
3525
3526static struct attribute *nvme_dev_attrs[] = {
3527 &dev_attr_reset_controller.attr,
3528 &dev_attr_rescan_controller.attr,
3529 &dev_attr_model.attr,
3530 &dev_attr_serial.attr,
3531 &dev_attr_firmware_rev.attr,
3532 &dev_attr_cntlid.attr,
3533 &dev_attr_delete_controller.attr,
3534 &dev_attr_transport.attr,
3535 &dev_attr_subsysnqn.attr,
3536 &dev_attr_address.attr,
3537 &dev_attr_state.attr,
3538 &dev_attr_numa_node.attr,
3539 &dev_attr_queue_count.attr,
3540 &dev_attr_sqsize.attr,
3541 &dev_attr_hostnqn.attr,
3542 &dev_attr_hostid.attr,
3543 &dev_attr_ctrl_loss_tmo.attr,
3544 &dev_attr_reconnect_delay.attr,
3545 &dev_attr_fast_io_fail_tmo.attr,
3546 &dev_attr_kato.attr,
3547 NULL
3548};
3549
3550static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3551 struct attribute *a, int n)
3552{
3553 struct device *dev = container_of(kobj, struct device, kobj);
3554 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3555
3556 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3557 return 0;
3558 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3559 return 0;
3560 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3561 return 0;
3562 if (a == &dev_attr_hostid.attr && !ctrl->opts)
3563 return 0;
3564 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
3565 return 0;
3566 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
3567 return 0;
3568 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
3569 return 0;
3570
3571 return a->mode;
3572}
3573
3574static const struct attribute_group nvme_dev_attrs_group = {
3575 .attrs = nvme_dev_attrs,
3576 .is_visible = nvme_dev_attrs_are_visible,
3577};
3578
3579static const struct attribute_group *nvme_dev_attr_groups[] = {
3580 &nvme_dev_attrs_group,
3581 NULL,
3582};
3583
3584static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3585 unsigned nsid)
3586{
3587 struct nvme_ns_head *h;
3588
3589 lockdep_assert_held(&subsys->lock);
3590
3591 list_for_each_entry(h, &subsys->nsheads, entry) {
3592 if (h->ns_id != nsid)
3593 continue;
3594 if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3595 return h;
3596 }
3597
3598 return NULL;
3599}
3600
3601static int __nvme_check_ids(struct nvme_subsystem *subsys,
3602 struct nvme_ns_head *new)
3603{
3604 struct nvme_ns_head *h;
3605
3606 lockdep_assert_held(&subsys->lock);
3607
3608 list_for_each_entry(h, &subsys->nsheads, entry) {
3609 if (nvme_ns_ids_valid(&new->ids) &&
3610 nvme_ns_ids_equal(&new->ids, &h->ids))
3611 return -EINVAL;
3612 }
3613
3614 return 0;
3615}
3616
3617static void nvme_cdev_rel(struct device *dev)
3618{
3619 ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3620}
3621
3622void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3623{
3624 cdev_device_del(cdev, cdev_device);
3625 put_device(cdev_device);
3626}
3627
3628int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3629 const struct file_operations *fops, struct module *owner)
3630{
3631 int minor, ret;
3632
3633 minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
3634 if (minor < 0)
3635 return minor;
3636 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3637 cdev_device->class = nvme_ns_chr_class;
3638 cdev_device->release = nvme_cdev_rel;
3639 device_initialize(cdev_device);
3640 cdev_init(cdev, fops);
3641 cdev->owner = owner;
3642 ret = cdev_device_add(cdev, cdev_device);
3643 if (ret)
3644 put_device(cdev_device);
3645
3646 return ret;
3647}
3648
3649static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3650{
3651 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3652}
3653
3654static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3655{
3656 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3657 return 0;
3658}
3659
3660static const struct file_operations nvme_ns_chr_fops = {
3661 .owner = THIS_MODULE,
3662 .open = nvme_ns_chr_open,
3663 .release = nvme_ns_chr_release,
3664 .unlocked_ioctl = nvme_ns_chr_ioctl,
3665 .compat_ioctl = compat_ptr_ioctl,
3666};
3667
3668static int nvme_add_ns_cdev(struct nvme_ns *ns)
3669{
3670 int ret;
3671
3672 ns->cdev_device.parent = ns->ctrl->device;
3673 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3674 ns->ctrl->instance, ns->head->instance);
3675 if (ret)
3676 return ret;
3677
3678 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3679 ns->ctrl->ops->module);
3680}
3681
3682static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3683 unsigned nsid, struct nvme_ns_ids *ids)
3684{
3685 struct nvme_ns_head *head;
3686 size_t size = sizeof(*head);
3687 int ret = -ENOMEM;
3688
3689#ifdef CONFIG_NVME_MULTIPATH
3690 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3691#endif
3692
3693 head = kzalloc(size, GFP_KERNEL);
3694 if (!head)
3695 goto out;
3696 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3697 if (ret < 0)
3698 goto out_free_head;
3699 head->instance = ret;
3700 INIT_LIST_HEAD(&head->list);
3701 ret = init_srcu_struct(&head->srcu);
3702 if (ret)
3703 goto out_ida_remove;
3704 head->subsys = ctrl->subsys;
3705 head->ns_id = nsid;
3706 head->ids = *ids;
3707 kref_init(&head->ref);
3708
3709 ret = __nvme_check_ids(ctrl->subsys, head);
3710 if (ret) {
3711 dev_err(ctrl->device,
3712 "duplicate IDs for nsid %d\n", nsid);
3713 goto out_cleanup_srcu;
3714 }
3715
3716 if (head->ids.csi) {
3717 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3718 if (ret)
3719 goto out_cleanup_srcu;
3720 } else
3721 head->effects = ctrl->effects;
3722
3723 ret = nvme_mpath_alloc_disk(ctrl, head);
3724 if (ret)
3725 goto out_cleanup_srcu;
3726
3727 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3728
3729 kref_get(&ctrl->subsys->ref);
3730
3731 return head;
3732out_cleanup_srcu:
3733 cleanup_srcu_struct(&head->srcu);
3734out_ida_remove:
3735 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3736out_free_head:
3737 kfree(head);
3738out:
3739 if (ret > 0)
3740 ret = blk_status_to_errno(nvme_error_status(ret));
3741 return ERR_PTR(ret);
3742}
3743
3744static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3745 struct nvme_ns_ids *ids, bool is_shared)
3746{
3747 struct nvme_ctrl *ctrl = ns->ctrl;
3748 struct nvme_ns_head *head = NULL;
3749 int ret = 0;
3750
3751 mutex_lock(&ctrl->subsys->lock);
3752 head = nvme_find_ns_head(ctrl->subsys, nsid);
3753 if (!head) {
3754 head = nvme_alloc_ns_head(ctrl, nsid, ids);
3755 if (IS_ERR(head)) {
3756 ret = PTR_ERR(head);
3757 goto out_unlock;
3758 }
3759 head->shared = is_shared;
3760 } else {
3761 ret = -EINVAL;
3762 if (!is_shared || !head->shared) {
3763 dev_err(ctrl->device,
3764 "Duplicate unshared namespace %d\n", nsid);
3765 goto out_put_ns_head;
3766 }
3767 if (!nvme_ns_ids_equal(&head->ids, ids)) {
3768 dev_err(ctrl->device,
3769 "IDs don't match for shared namespace %d\n",
3770 nsid);
3771 goto out_put_ns_head;
3772 }
3773 }
3774
3775 list_add_tail_rcu(&ns->siblings, &head->list);
3776 ns->head = head;
3777 mutex_unlock(&ctrl->subsys->lock);
3778 return 0;
3779
3780out_put_ns_head:
3781 nvme_put_ns_head(head);
3782out_unlock:
3783 mutex_unlock(&ctrl->subsys->lock);
3784 return ret;
3785}
3786
3787struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3788{
3789 struct nvme_ns *ns, *ret = NULL;
3790
3791 down_read(&ctrl->namespaces_rwsem);
3792 list_for_each_entry(ns, &ctrl->namespaces, list) {
3793 if (ns->head->ns_id == nsid) {
3794 if (!nvme_get_ns(ns))
3795 continue;
3796 ret = ns;
3797 break;
3798 }
3799 if (ns->head->ns_id > nsid)
3800 break;
3801 }
3802 up_read(&ctrl->namespaces_rwsem);
3803 return ret;
3804}
3805EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3806
3807
3808
3809
3810static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3811{
3812 struct nvme_ns *tmp;
3813
3814 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3815 if (tmp->head->ns_id < ns->head->ns_id) {
3816 list_add(&ns->list, &tmp->list);
3817 return;
3818 }
3819 }
3820 list_add(&ns->list, &ns->ctrl->namespaces);
3821}
3822
3823static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
3824 struct nvme_ns_ids *ids)
3825{
3826 struct nvme_ns *ns;
3827 struct gendisk *disk;
3828 struct nvme_id_ns *id;
3829 int node = ctrl->numa_node;
3830
3831 if (nvme_identify_ns(ctrl, nsid, ids, &id))
3832 return;
3833
3834 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3835 if (!ns)
3836 goto out_free_id;
3837
3838 disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3839 if (IS_ERR(disk))
3840 goto out_free_ns;
3841 disk->fops = &nvme_bdev_ops;
3842 disk->private_data = ns;
3843
3844 ns->disk = disk;
3845 ns->queue = disk->queue;
3846
3847 if (ctrl->opts && ctrl->opts->data_digest)
3848 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3849
3850 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3851 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3852 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3853
3854 ns->ctrl = ctrl;
3855 kref_init(&ns->kref);
3856
3857 if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
3858 goto out_cleanup_disk;
3859
3860
3861
3862
3863
3864
3865 if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
3866 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3867 ns->head->instance);
3868
3869 if (nvme_update_ns_info(ns, id))
3870 goto out_unlink_ns;
3871
3872 down_write(&ctrl->namespaces_rwsem);
3873 nvme_ns_add_to_ctrl_list(ns);
3874 up_write(&ctrl->namespaces_rwsem);
3875 nvme_get_ctrl(ctrl);
3876
3877 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3878 goto out_cleanup_ns_from_list;
3879
3880 if (!nvme_ns_head_multipath(ns->head))
3881 nvme_add_ns_cdev(ns);
3882
3883 nvme_mpath_add_disk(ns, id);
3884 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3885 kfree(id);
3886
3887 return;
3888
3889 out_cleanup_ns_from_list:
3890 nvme_put_ctrl(ctrl);
3891 down_write(&ctrl->namespaces_rwsem);
3892 list_del_init(&ns->list);
3893 up_write(&ctrl->namespaces_rwsem);
3894 out_unlink_ns:
3895 mutex_lock(&ctrl->subsys->lock);
3896 list_del_rcu(&ns->siblings);
3897 if (list_empty(&ns->head->list))
3898 list_del_init(&ns->head->entry);
3899 mutex_unlock(&ctrl->subsys->lock);
3900 nvme_put_ns_head(ns->head);
3901 out_cleanup_disk:
3902 blk_cleanup_disk(disk);
3903 out_free_ns:
3904 kfree(ns);
3905 out_free_id:
3906 kfree(id);
3907}
3908
3909static void nvme_ns_remove(struct nvme_ns *ns)
3910{
3911 bool last_path = false;
3912
3913 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3914 return;
3915
3916 clear_bit(NVME_NS_READY, &ns->flags);
3917 set_capacity(ns->disk, 0);
3918 nvme_fault_inject_fini(&ns->fault_inject);
3919
3920 mutex_lock(&ns->ctrl->subsys->lock);
3921 list_del_rcu(&ns->siblings);
3922 if (list_empty(&ns->head->list)) {
3923 list_del_init(&ns->head->entry);
3924 last_path = true;
3925 }
3926 mutex_unlock(&ns->ctrl->subsys->lock);
3927
3928
3929 synchronize_rcu();
3930
3931
3932 if (nvme_mpath_clear_current_path(ns))
3933 synchronize_srcu(&ns->head->srcu);
3934
3935 if (!nvme_ns_head_multipath(ns->head))
3936 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3937 del_gendisk(ns->disk);
3938 blk_cleanup_queue(ns->queue);
3939
3940 down_write(&ns->ctrl->namespaces_rwsem);
3941 list_del_init(&ns->list);
3942 up_write(&ns->ctrl->namespaces_rwsem);
3943
3944 if (last_path)
3945 nvme_mpath_shutdown_disk(ns->head);
3946 nvme_put_ns(ns);
3947}
3948
3949static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3950{
3951 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3952
3953 if (ns) {
3954 nvme_ns_remove(ns);
3955 nvme_put_ns(ns);
3956 }
3957}
3958
3959static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
3960{
3961 struct nvme_id_ns *id;
3962 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3963
3964 if (test_bit(NVME_NS_DEAD, &ns->flags))
3965 goto out;
3966
3967 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
3968 if (ret)
3969 goto out;
3970
3971 ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3972 if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
3973 dev_err(ns->ctrl->device,
3974 "identifiers changed for nsid %d\n", ns->head->ns_id);
3975 goto out_free_id;
3976 }
3977
3978 ret = nvme_update_ns_info(ns, id);
3979
3980out_free_id:
3981 kfree(id);
3982out:
3983
3984
3985
3986
3987
3988
3989 if (ret > 0 && (ret & NVME_SC_DNR))
3990 nvme_ns_remove(ns);
3991}
3992
3993static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3994{
3995 struct nvme_ns_ids ids = { };
3996 struct nvme_ns *ns;
3997
3998 if (nvme_identify_ns_descs(ctrl, nsid, &ids))
3999 return;
4000
4001 ns = nvme_find_get_ns(ctrl, nsid);
4002 if (ns) {
4003 nvme_validate_ns(ns, &ids);
4004 nvme_put_ns(ns);
4005 return;
4006 }
4007
4008 switch (ids.csi) {
4009 case NVME_CSI_NVM:
4010 nvme_alloc_ns(ctrl, nsid, &ids);
4011 break;
4012 case NVME_CSI_ZNS:
4013 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
4014 dev_warn(ctrl->device,
4015 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
4016 nsid);
4017 break;
4018 }
4019 if (!nvme_multi_css(ctrl)) {
4020 dev_warn(ctrl->device,
4021 "command set not reported for nsid: %d\n",
4022 nsid);
4023 break;
4024 }
4025 nvme_alloc_ns(ctrl, nsid, &ids);
4026 break;
4027 default:
4028 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
4029 ids.csi, nsid);
4030 break;
4031 }
4032}
4033
4034static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
4035 unsigned nsid)
4036{
4037 struct nvme_ns *ns, *next;
4038 LIST_HEAD(rm_list);
4039
4040 down_write(&ctrl->namespaces_rwsem);
4041 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4042 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4043 list_move_tail(&ns->list, &rm_list);
4044 }
4045 up_write(&ctrl->namespaces_rwsem);
4046
4047 list_for_each_entry_safe(ns, next, &rm_list, list)
4048 nvme_ns_remove(ns);
4049
4050}
4051
4052static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4053{
4054 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4055 __le32 *ns_list;
4056 u32 prev = 0;
4057 int ret = 0, i;
4058
4059 if (nvme_ctrl_limited_cns(ctrl))
4060 return -EOPNOTSUPP;
4061
4062 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4063 if (!ns_list)
4064 return -ENOMEM;
4065
4066 for (;;) {
4067 struct nvme_command cmd = {
4068 .identify.opcode = nvme_admin_identify,
4069 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
4070 .identify.nsid = cpu_to_le32(prev),
4071 };
4072
4073 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4074 NVME_IDENTIFY_DATA_SIZE);
4075 if (ret) {
4076 dev_warn(ctrl->device,
4077 "Identify NS List failed (status=0x%x)\n", ret);
4078 goto free;
4079 }
4080
4081 for (i = 0; i < nr_entries; i++) {
4082 u32 nsid = le32_to_cpu(ns_list[i]);
4083
4084 if (!nsid)
4085 goto out;
4086 nvme_validate_or_alloc_ns(ctrl, nsid);
4087 while (++prev < nsid)
4088 nvme_ns_remove_by_nsid(ctrl, prev);
4089 }
4090 }
4091 out:
4092 nvme_remove_invalid_namespaces(ctrl, prev);
4093 free:
4094 kfree(ns_list);
4095 return ret;
4096}
4097
4098static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4099{
4100 struct nvme_id_ctrl *id;
4101 u32 nn, i;
4102
4103 if (nvme_identify_ctrl(ctrl, &id))
4104 return;
4105 nn = le32_to_cpu(id->nn);
4106 kfree(id);
4107
4108 for (i = 1; i <= nn; i++)
4109 nvme_validate_or_alloc_ns(ctrl, i);
4110
4111 nvme_remove_invalid_namespaces(ctrl, nn);
4112}
4113
4114static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4115{
4116 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4117 __le32 *log;
4118 int error;
4119
4120 log = kzalloc(log_size, GFP_KERNEL);
4121 if (!log)
4122 return;
4123
4124
4125
4126
4127
4128
4129
4130 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4131 NVME_CSI_NVM, log, log_size, 0);
4132 if (error)
4133 dev_warn(ctrl->device,
4134 "reading changed ns log failed: %d\n", error);
4135
4136 kfree(log);
4137}
4138
4139static void nvme_scan_work(struct work_struct *work)
4140{
4141 struct nvme_ctrl *ctrl =
4142 container_of(work, struct nvme_ctrl, scan_work);
4143
4144
4145 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4146 return;
4147
4148 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4149 dev_info(ctrl->device, "rescanning namespaces.\n");
4150 nvme_clear_changed_ns_log(ctrl);
4151 }
4152
4153 mutex_lock(&ctrl->scan_lock);
4154 if (nvme_scan_ns_list(ctrl) != 0)
4155 nvme_scan_ns_sequential(ctrl);
4156 mutex_unlock(&ctrl->scan_lock);
4157}
4158
4159
4160
4161
4162
4163
4164void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4165{
4166 struct nvme_ns *ns, *next;
4167 LIST_HEAD(ns_list);
4168
4169
4170
4171
4172
4173
4174 nvme_mpath_clear_ctrl_paths(ctrl);
4175
4176
4177 flush_work(&ctrl->scan_work);
4178
4179
4180
4181
4182
4183
4184
4185 if (ctrl->state == NVME_CTRL_DEAD)
4186 nvme_kill_queues(ctrl);
4187
4188
4189 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4190
4191 down_write(&ctrl->namespaces_rwsem);
4192 list_splice_init(&ctrl->namespaces, &ns_list);
4193 up_write(&ctrl->namespaces_rwsem);
4194
4195 list_for_each_entry_safe(ns, next, &ns_list, list)
4196 nvme_ns_remove(ns);
4197}
4198EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4199
4200static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
4201{
4202 struct nvme_ctrl *ctrl =
4203 container_of(dev, struct nvme_ctrl, ctrl_device);
4204 struct nvmf_ctrl_options *opts = ctrl->opts;
4205 int ret;
4206
4207 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4208 if (ret)
4209 return ret;
4210
4211 if (opts) {
4212 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4213 if (ret)
4214 return ret;
4215
4216 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4217 opts->trsvcid ?: "none");
4218 if (ret)
4219 return ret;
4220
4221 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4222 opts->host_traddr ?: "none");
4223 if (ret)
4224 return ret;
4225
4226 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4227 opts->host_iface ?: "none");
4228 }
4229 return ret;
4230}
4231
4232static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4233{
4234 char *envp[2] = { NULL, NULL };
4235 u32 aen_result = ctrl->aen_result;
4236
4237 ctrl->aen_result = 0;
4238 if (!aen_result)
4239 return;
4240
4241 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4242 if (!envp[0])
4243 return;
4244 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4245 kfree(envp[0]);
4246}
4247
4248static void nvme_async_event_work(struct work_struct *work)
4249{
4250 struct nvme_ctrl *ctrl =
4251 container_of(work, struct nvme_ctrl, async_event_work);
4252
4253 nvme_aen_uevent(ctrl);
4254
4255
4256
4257
4258
4259
4260 if (ctrl->state == NVME_CTRL_LIVE)
4261 ctrl->ops->submit_async_event(ctrl);
4262}
4263
4264static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4265{
4266
4267 u32 csts;
4268
4269 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4270 return false;
4271
4272 if (csts == ~0)
4273 return false;
4274
4275 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4276}
4277
4278static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4279{
4280 struct nvme_fw_slot_info_log *log;
4281
4282 log = kmalloc(sizeof(*log), GFP_KERNEL);
4283 if (!log)
4284 return;
4285
4286 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4287 log, sizeof(*log), 0))
4288 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4289 kfree(log);
4290}
4291
4292static void nvme_fw_act_work(struct work_struct *work)
4293{
4294 struct nvme_ctrl *ctrl = container_of(work,
4295 struct nvme_ctrl, fw_act_work);
4296 unsigned long fw_act_timeout;
4297
4298 if (ctrl->mtfa)
4299 fw_act_timeout = jiffies +
4300 msecs_to_jiffies(ctrl->mtfa * 100);
4301 else
4302 fw_act_timeout = jiffies +
4303 msecs_to_jiffies(admin_timeout * 1000);
4304
4305 nvme_stop_queues(ctrl);
4306 while (nvme_ctrl_pp_status(ctrl)) {
4307 if (time_after(jiffies, fw_act_timeout)) {
4308 dev_warn(ctrl->device,
4309 "Fw activation timeout, reset controller\n");
4310 nvme_try_sched_reset(ctrl);
4311 return;
4312 }
4313 msleep(100);
4314 }
4315
4316 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4317 return;
4318
4319 nvme_start_queues(ctrl);
4320
4321 nvme_get_fw_slot_info(ctrl);
4322}
4323
4324static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4325{
4326 u32 aer_notice_type = (result & 0xff00) >> 8;
4327
4328 trace_nvme_async_event(ctrl, aer_notice_type);
4329
4330 switch (aer_notice_type) {
4331 case NVME_AER_NOTICE_NS_CHANGED:
4332 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4333 nvme_queue_scan(ctrl);
4334 break;
4335 case NVME_AER_NOTICE_FW_ACT_STARTING:
4336
4337
4338
4339
4340
4341 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4342 queue_work(nvme_wq, &ctrl->fw_act_work);
4343 break;
4344#ifdef CONFIG_NVME_MULTIPATH
4345 case NVME_AER_NOTICE_ANA:
4346 if (!ctrl->ana_log_buf)
4347 break;
4348 queue_work(nvme_wq, &ctrl->ana_work);
4349 break;
4350#endif
4351 case NVME_AER_NOTICE_DISC_CHANGED:
4352 ctrl->aen_result = result;
4353 break;
4354 default:
4355 dev_warn(ctrl->device, "async event result %08x\n", result);
4356 }
4357}
4358
4359void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4360 volatile union nvme_result *res)
4361{
4362 u32 result = le32_to_cpu(res->u32);
4363 u32 aer_type = result & 0x07;
4364
4365 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4366 return;
4367
4368 switch (aer_type) {
4369 case NVME_AER_NOTICE:
4370 nvme_handle_aen_notice(ctrl, result);
4371 break;
4372 case NVME_AER_ERROR:
4373 case NVME_AER_SMART:
4374 case NVME_AER_CSS:
4375 case NVME_AER_VS:
4376 trace_nvme_async_event(ctrl, aer_type);
4377 ctrl->aen_result = result;
4378 break;
4379 default:
4380 break;
4381 }
4382 queue_work(nvme_wq, &ctrl->async_event_work);
4383}
4384EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4385
4386void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4387{
4388 nvme_mpath_stop(ctrl);
4389 nvme_stop_keep_alive(ctrl);
4390 nvme_stop_failfast_work(ctrl);
4391 flush_work(&ctrl->async_event_work);
4392 cancel_work_sync(&ctrl->fw_act_work);
4393}
4394EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4395
4396void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4397{
4398 nvme_start_keep_alive(ctrl);
4399
4400 nvme_enable_aen(ctrl);
4401
4402 if (ctrl->queue_count > 1) {
4403 nvme_queue_scan(ctrl);
4404 nvme_start_queues(ctrl);
4405 }
4406}
4407EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4408
4409void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4410{
4411 nvme_hwmon_exit(ctrl);
4412 nvme_fault_inject_fini(&ctrl->fault_inject);
4413 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4414 cdev_device_del(&ctrl->cdev, ctrl->device);
4415 nvme_put_ctrl(ctrl);
4416}
4417EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4418
4419static void nvme_free_cels(struct nvme_ctrl *ctrl)
4420{
4421 struct nvme_effects_log *cel;
4422 unsigned long i;
4423
4424 xa_for_each(&ctrl->cels, i, cel) {
4425 xa_erase(&ctrl->cels, i);
4426 kfree(cel);
4427 }
4428
4429 xa_destroy(&ctrl->cels);
4430}
4431
4432static void nvme_free_ctrl(struct device *dev)
4433{
4434 struct nvme_ctrl *ctrl =
4435 container_of(dev, struct nvme_ctrl, ctrl_device);
4436 struct nvme_subsystem *subsys = ctrl->subsys;
4437
4438 if (!subsys || ctrl->instance != subsys->instance)
4439 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4440
4441 nvme_free_cels(ctrl);
4442 nvme_mpath_uninit(ctrl);
4443 __free_page(ctrl->discard_page);
4444
4445 if (subsys) {
4446 mutex_lock(&nvme_subsystems_lock);
4447 list_del(&ctrl->subsys_entry);
4448 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4449 mutex_unlock(&nvme_subsystems_lock);
4450 }
4451
4452 ctrl->ops->free_ctrl(ctrl);
4453
4454 if (subsys)
4455 nvme_put_subsystem(subsys);
4456}
4457
4458
4459
4460
4461
4462
4463int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4464 const struct nvme_ctrl_ops *ops, unsigned long quirks)
4465{
4466 int ret;
4467
4468 ctrl->state = NVME_CTRL_NEW;
4469 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4470 spin_lock_init(&ctrl->lock);
4471 mutex_init(&ctrl->scan_lock);
4472 INIT_LIST_HEAD(&ctrl->namespaces);
4473 xa_init(&ctrl->cels);
4474 init_rwsem(&ctrl->namespaces_rwsem);
4475 ctrl->dev = dev;
4476 ctrl->ops = ops;
4477 ctrl->quirks = quirks;
4478 ctrl->numa_node = NUMA_NO_NODE;
4479 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4480 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4481 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4482 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4483 init_waitqueue_head(&ctrl->state_wq);
4484
4485 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4486 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4487 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4488 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4489
4490 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4491 PAGE_SIZE);
4492 ctrl->discard_page = alloc_page(GFP_KERNEL);
4493 if (!ctrl->discard_page) {
4494 ret = -ENOMEM;
4495 goto out;
4496 }
4497
4498 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4499 if (ret < 0)
4500 goto out;
4501 ctrl->instance = ret;
4502
4503 device_initialize(&ctrl->ctrl_device);
4504 ctrl->device = &ctrl->ctrl_device;
4505 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4506 ctrl->instance);
4507 ctrl->device->class = nvme_class;
4508 ctrl->device->parent = ctrl->dev;
4509 ctrl->device->groups = nvme_dev_attr_groups;
4510 ctrl->device->release = nvme_free_ctrl;
4511 dev_set_drvdata(ctrl->device, ctrl);
4512 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4513 if (ret)
4514 goto out_release_instance;
4515
4516 nvme_get_ctrl(ctrl);
4517 cdev_init(&ctrl->cdev, &nvme_dev_fops);
4518 ctrl->cdev.owner = ops->module;
4519 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4520 if (ret)
4521 goto out_free_name;
4522
4523
4524
4525
4526
4527 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4528 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4529 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4530
4531 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4532 nvme_mpath_init_ctrl(ctrl);
4533
4534 return 0;
4535out_free_name:
4536 nvme_put_ctrl(ctrl);
4537 kfree_const(ctrl->device->kobj.name);
4538out_release_instance:
4539 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4540out:
4541 if (ctrl->discard_page)
4542 __free_page(ctrl->discard_page);
4543 return ret;
4544}
4545EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4546
4547static void nvme_start_ns_queue(struct nvme_ns *ns)
4548{
4549 if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
4550 blk_mq_unquiesce_queue(ns->queue);
4551}
4552
4553static void nvme_stop_ns_queue(struct nvme_ns *ns)
4554{
4555 if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
4556 blk_mq_quiesce_queue(ns->queue);
4557 else
4558 blk_mq_wait_quiesce_done(ns->queue);
4559}
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569static void nvme_set_queue_dying(struct nvme_ns *ns)
4570{
4571 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
4572 return;
4573
4574 blk_mark_disk_dead(ns->disk);
4575 nvme_start_ns_queue(ns);
4576
4577 set_capacity_and_notify(ns->disk, 0);
4578}
4579
4580
4581
4582
4583
4584
4585
4586
4587void nvme_kill_queues(struct nvme_ctrl *ctrl)
4588{
4589 struct nvme_ns *ns;
4590
4591 down_read(&ctrl->namespaces_rwsem);
4592
4593
4594 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4595 nvme_start_admin_queue(ctrl);
4596
4597 list_for_each_entry(ns, &ctrl->namespaces, list)
4598 nvme_set_queue_dying(ns);
4599
4600 up_read(&ctrl->namespaces_rwsem);
4601}
4602EXPORT_SYMBOL_GPL(nvme_kill_queues);
4603
4604void nvme_unfreeze(struct nvme_ctrl *ctrl)
4605{
4606 struct nvme_ns *ns;
4607
4608 down_read(&ctrl->namespaces_rwsem);
4609 list_for_each_entry(ns, &ctrl->namespaces, list)
4610 blk_mq_unfreeze_queue(ns->queue);
4611 up_read(&ctrl->namespaces_rwsem);
4612}
4613EXPORT_SYMBOL_GPL(nvme_unfreeze);
4614
4615int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4616{
4617 struct nvme_ns *ns;
4618
4619 down_read(&ctrl->namespaces_rwsem);
4620 list_for_each_entry(ns, &ctrl->namespaces, list) {
4621 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4622 if (timeout <= 0)
4623 break;
4624 }
4625 up_read(&ctrl->namespaces_rwsem);
4626 return timeout;
4627}
4628EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4629
4630void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4631{
4632 struct nvme_ns *ns;
4633
4634 down_read(&ctrl->namespaces_rwsem);
4635 list_for_each_entry(ns, &ctrl->namespaces, list)
4636 blk_mq_freeze_queue_wait(ns->queue);
4637 up_read(&ctrl->namespaces_rwsem);
4638}
4639EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4640
4641void nvme_start_freeze(struct nvme_ctrl *ctrl)
4642{
4643 struct nvme_ns *ns;
4644
4645 down_read(&ctrl->namespaces_rwsem);
4646 list_for_each_entry(ns, &ctrl->namespaces, list)
4647 blk_freeze_queue_start(ns->queue);
4648 up_read(&ctrl->namespaces_rwsem);
4649}
4650EXPORT_SYMBOL_GPL(nvme_start_freeze);
4651
4652void nvme_stop_queues(struct nvme_ctrl *ctrl)
4653{
4654 struct nvme_ns *ns;
4655
4656 down_read(&ctrl->namespaces_rwsem);
4657 list_for_each_entry(ns, &ctrl->namespaces, list)
4658 nvme_stop_ns_queue(ns);
4659 up_read(&ctrl->namespaces_rwsem);
4660}
4661EXPORT_SYMBOL_GPL(nvme_stop_queues);
4662
4663void nvme_start_queues(struct nvme_ctrl *ctrl)
4664{
4665 struct nvme_ns *ns;
4666
4667 down_read(&ctrl->namespaces_rwsem);
4668 list_for_each_entry(ns, &ctrl->namespaces, list)
4669 nvme_start_ns_queue(ns);
4670 up_read(&ctrl->namespaces_rwsem);
4671}
4672EXPORT_SYMBOL_GPL(nvme_start_queues);
4673
4674void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
4675{
4676 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4677 blk_mq_quiesce_queue(ctrl->admin_q);
4678 else
4679 blk_mq_wait_quiesce_done(ctrl->admin_q);
4680}
4681EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
4682
4683void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
4684{
4685 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4686 blk_mq_unquiesce_queue(ctrl->admin_q);
4687}
4688EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
4689
4690void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4691{
4692 struct nvme_ns *ns;
4693
4694 down_read(&ctrl->namespaces_rwsem);
4695 list_for_each_entry(ns, &ctrl->namespaces, list)
4696 blk_sync_queue(ns->queue);
4697 up_read(&ctrl->namespaces_rwsem);
4698}
4699EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4700
4701void nvme_sync_queues(struct nvme_ctrl *ctrl)
4702{
4703 nvme_sync_io_queues(ctrl);
4704 if (ctrl->admin_q)
4705 blk_sync_queue(ctrl->admin_q);
4706}
4707EXPORT_SYMBOL_GPL(nvme_sync_queues);
4708
4709struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4710{
4711 if (file->f_op != &nvme_dev_fops)
4712 return NULL;
4713 return file->private_data;
4714}
4715EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4716
4717
4718
4719
4720static inline void _nvme_check_size(void)
4721{
4722 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4723 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4724 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4725 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4726 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4727 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4728 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4729 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4730 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4731 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4732 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4733 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4734 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4735 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4736 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4737 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4738 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4739 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4740 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4741 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4742}
4743
4744
4745static int __init nvme_core_init(void)
4746{
4747 int result = -ENOMEM;
4748
4749 _nvme_check_size();
4750
4751 nvme_wq = alloc_workqueue("nvme-wq",
4752 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4753 if (!nvme_wq)
4754 goto out;
4755
4756 nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4757 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4758 if (!nvme_reset_wq)
4759 goto destroy_wq;
4760
4761 nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4762 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4763 if (!nvme_delete_wq)
4764 goto destroy_reset_wq;
4765
4766 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
4767 NVME_MINORS, "nvme");
4768 if (result < 0)
4769 goto destroy_delete_wq;
4770
4771 nvme_class = class_create(THIS_MODULE, "nvme");
4772 if (IS_ERR(nvme_class)) {
4773 result = PTR_ERR(nvme_class);
4774 goto unregister_chrdev;
4775 }
4776 nvme_class->dev_uevent = nvme_class_uevent;
4777
4778 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4779 if (IS_ERR(nvme_subsys_class)) {
4780 result = PTR_ERR(nvme_subsys_class);
4781 goto destroy_class;
4782 }
4783
4784 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
4785 "nvme-generic");
4786 if (result < 0)
4787 goto destroy_subsys_class;
4788
4789 nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
4790 if (IS_ERR(nvme_ns_chr_class)) {
4791 result = PTR_ERR(nvme_ns_chr_class);
4792 goto unregister_generic_ns;
4793 }
4794
4795 return 0;
4796
4797unregister_generic_ns:
4798 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4799destroy_subsys_class:
4800 class_destroy(nvme_subsys_class);
4801destroy_class:
4802 class_destroy(nvme_class);
4803unregister_chrdev:
4804 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4805destroy_delete_wq:
4806 destroy_workqueue(nvme_delete_wq);
4807destroy_reset_wq:
4808 destroy_workqueue(nvme_reset_wq);
4809destroy_wq:
4810 destroy_workqueue(nvme_wq);
4811out:
4812 return result;
4813}
4814
4815static void __exit nvme_core_exit(void)
4816{
4817 class_destroy(nvme_ns_chr_class);
4818 class_destroy(nvme_subsys_class);
4819 class_destroy(nvme_class);
4820 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4821 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4822 destroy_workqueue(nvme_delete_wq);
4823 destroy_workqueue(nvme_reset_wq);
4824 destroy_workqueue(nvme_wq);
4825 ida_destroy(&nvme_ns_chr_minor_ida);
4826 ida_destroy(&nvme_instance_ida);
4827}
4828
4829MODULE_LICENSE("GPL");
4830MODULE_VERSION("1.0");
4831module_init(nvme_core_init);
4832module_exit(nvme_core_exit);
4833